diff --git a/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..307c91f228c09249be0b5f006616419c7962d14e --- /dev/null +++ b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12d3aaf19635df34c0d00d013d2be4625b14e08027937b94a97e91a9d03a08bd +size 33555627 diff --git a/ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..64c763c7a387b257bbe74757cecc62815ff34988 --- /dev/null +++ b/ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56c3e5a58bd448987d60457944256d02648c7eb8c8d1b942da6731e1c929c994 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/24.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/24.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9e00c3165b021680caa64fcfe375cda73269e213 --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0ea9e34122f1c7124b679892b05c4c6610f2c20aaa7d5e2751c11eda885125 +size 50332843 diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4362b49151d7b34ef83b3067a8f9c9f877d72a0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/LICENSE @@ -0,0 +1,502 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3ec4d797a2de96620acc6c38a67ea3e819bf5f46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/METADATA @@ -0,0 +1,97 @@ +Metadata-Version: 2.1 +Name: chardet +Version: 5.2.0 +Summary: Universal encoding detector for Python 3 +Home-page: https://github.com/chardet/chardet +Author: Mark Pilgrim +Author-email: mark@diveintomark.org +Maintainer: Daniel Blanchard +Maintainer-email: dan.blanchard@gmail.com +License: LGPL +Project-URL: Documentation, https://chardet.readthedocs.io/ +Project-URL: GitHub Project, https://github.com/chardet/chardet +Project-URL: Issue Tracker, https://github.com/chardet/chardet/issues +Keywords: encoding,i18n,xml +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Linguistic +Requires-Python: >=3.7 +License-File: LICENSE + +Chardet: The Universal Character Encoding Detector +-------------------------------------------------- + +.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg + :alt: Build status + :target: https://travis-ci.org/chardet/chardet + +.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg + :target: https://coveralls.io/r/chardet/chardet + +.. image:: https://img.shields.io/pypi/v/chardet.svg + :target: https://warehouse.python.org/project/chardet/ + :alt: Latest version on PyPI + +.. image:: https://img.shields.io/pypi/l/chardet.svg + :alt: License + + +Detects + - ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants) + - Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese) + - EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese) + - EUC-KR, ISO-2022-KR, Johab (Korean) + - KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic) + - ISO-8859-5, windows-1251 (Bulgarian) + - ISO-8859-1, windows-1252, MacRoman (Western European languages) + - ISO-8859-7, windows-1253 (Greek) + - ISO-8859-8, windows-1255 (Visual and Logical Hebrew) + - TIS-620 (Thai) + +.. note:: + Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily + disabled until we can retrain the models. + +Requires Python 3.7+. + +Installation +------------ + +Install from `PyPI `_:: + + pip install chardet + +Documentation +------------- + +For users, docs are now available at https://chardet.readthedocs.io/. + +Command-line Tool +----------------- + +chardet comes with a command-line script which reports on the encodings of one +or more files:: + + % chardetect somefile someotherfile + somefile: windows-1252 with confidence 0.5 + someotherfile: ascii with confidence 1.0 + +About +----- + +This is a continuation of Mark Pilgrim's excellent original chardet port from C, and `Ian Cordasco `_'s +`charade `_ Python 3-compatible fork. + +:maintainer: Dan Blanchard diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7d5d2b02c1b01a59200b8cab857d6e7a38fff3c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/RECORD @@ -0,0 +1,107 @@ +../../../bin/chardetect,sha256=_kiNDcCkZV9nIa4bKK1rfjfgbUvQBc5-VGUKJHh7Y2c,250 +chardet-5.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +chardet-5.2.0.dist-info/LICENSE,sha256=3GJlINzVOiL3J68-5Cx3DlbJemT-OtsGN5nYqwMv5VE,26530 +chardet-5.2.0.dist-info/METADATA,sha256=PAr2NQ6hQWpjyFnwlI7MoxHt2S_6oRiUsucOKMNhzGw,3418 +chardet-5.2.0.dist-info/RECORD,, +chardet-5.2.0.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92 +chardet-5.2.0.dist-info/entry_points.txt,sha256=_cdvYc4jyY68GYfsQAAthNMxO-yodcGkvNC1xOEsLmI,59 +chardet-5.2.0.dist-info/top_level.txt,sha256=AowzBbZy4x8EirABDdJSLJZMkJ_53iIag8xfKR6D7kI,8 +chardet/__init__.py,sha256=57R-HSxj0PWmILMN0GFmUNqEMfrEVSamXyjD-W6_fbs,4797 +chardet/__main__.py,sha256=puNj2o_QfBRKElEkiVp1zEIL1gGYD2o-JuXLFlqHDC4,123 +chardet/__pycache__/__init__.cpython-310.pyc,, +chardet/__pycache__/__main__.cpython-310.pyc,, +chardet/__pycache__/big5freq.cpython-310.pyc,, +chardet/__pycache__/big5prober.cpython-310.pyc,, +chardet/__pycache__/chardistribution.cpython-310.pyc,, +chardet/__pycache__/charsetgroupprober.cpython-310.pyc,, +chardet/__pycache__/charsetprober.cpython-310.pyc,, +chardet/__pycache__/codingstatemachine.cpython-310.pyc,, +chardet/__pycache__/codingstatemachinedict.cpython-310.pyc,, +chardet/__pycache__/cp949prober.cpython-310.pyc,, +chardet/__pycache__/enums.cpython-310.pyc,, +chardet/__pycache__/escprober.cpython-310.pyc,, +chardet/__pycache__/escsm.cpython-310.pyc,, +chardet/__pycache__/eucjpprober.cpython-310.pyc,, +chardet/__pycache__/euckrfreq.cpython-310.pyc,, +chardet/__pycache__/euckrprober.cpython-310.pyc,, +chardet/__pycache__/euctwfreq.cpython-310.pyc,, +chardet/__pycache__/euctwprober.cpython-310.pyc,, +chardet/__pycache__/gb2312freq.cpython-310.pyc,, +chardet/__pycache__/gb2312prober.cpython-310.pyc,, +chardet/__pycache__/hebrewprober.cpython-310.pyc,, +chardet/__pycache__/jisfreq.cpython-310.pyc,, +chardet/__pycache__/johabfreq.cpython-310.pyc,, +chardet/__pycache__/johabprober.cpython-310.pyc,, +chardet/__pycache__/jpcntx.cpython-310.pyc,, +chardet/__pycache__/langbulgarianmodel.cpython-310.pyc,, +chardet/__pycache__/langgreekmodel.cpython-310.pyc,, +chardet/__pycache__/langhebrewmodel.cpython-310.pyc,, +chardet/__pycache__/langhungarianmodel.cpython-310.pyc,, +chardet/__pycache__/langrussianmodel.cpython-310.pyc,, +chardet/__pycache__/langthaimodel.cpython-310.pyc,, +chardet/__pycache__/langturkishmodel.cpython-310.pyc,, +chardet/__pycache__/latin1prober.cpython-310.pyc,, +chardet/__pycache__/macromanprober.cpython-310.pyc,, +chardet/__pycache__/mbcharsetprober.cpython-310.pyc,, +chardet/__pycache__/mbcsgroupprober.cpython-310.pyc,, +chardet/__pycache__/mbcssm.cpython-310.pyc,, +chardet/__pycache__/resultdict.cpython-310.pyc,, +chardet/__pycache__/sbcharsetprober.cpython-310.pyc,, +chardet/__pycache__/sbcsgroupprober.cpython-310.pyc,, +chardet/__pycache__/sjisprober.cpython-310.pyc,, +chardet/__pycache__/universaldetector.cpython-310.pyc,, +chardet/__pycache__/utf1632prober.cpython-310.pyc,, +chardet/__pycache__/utf8prober.cpython-310.pyc,, +chardet/__pycache__/version.cpython-310.pyc,, +chardet/big5freq.py,sha256=ltcfP-3PjlNHCoo5e4a7C4z-2DhBTXRfY6jbMbB7P30,31274 +chardet/big5prober.py,sha256=lPMfwCX6v2AaPgvFh_cSWZcgLDbWiFCHLZ_p9RQ9uxE,1763 +chardet/chardistribution.py,sha256=13B8XUG4oXDuLdXvfbIWwLFeR-ZU21AqTS1zcdON8bU,10032 +chardet/charsetgroupprober.py,sha256=UKK3SaIZB2PCdKSIS0gnvMtLR9JJX62M-fZJu3OlWyg,3915 +chardet/charsetprober.py,sha256=L3t8_wIOov8em-vZWOcbkdsrwe43N6_gqNh5pH7WPd4,5420 +chardet/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +chardet/cli/__pycache__/__init__.cpython-310.pyc,, +chardet/cli/__pycache__/chardetect.cpython-310.pyc,, +chardet/cli/chardetect.py,sha256=zibMVg5RpKb-ME9_7EYG4ZM2Sf07NHcQzZ12U-rYJho,3242 +chardet/codingstatemachine.py,sha256=K7k69sw3jY5DmTXoSJQVsUtFIQKYPQVOSJJhBuGv_yE,3732 +chardet/codingstatemachinedict.py,sha256=0GY3Hi2qIZvDrOOJ3AtqppM1RsYxr_66ER4EHjuMiMc,542 +chardet/cp949prober.py,sha256=0jKRV7fECuWI16rNnks0ZECKA1iZYCIEaP8A1ZvjUSI,1860 +chardet/enums.py,sha256=TzECiZoCKNMqgwU76cPCeKWFBqaWvAdLMev5_bCkhY8,1683 +chardet/escprober.py,sha256=Kho48X65xE0scFylIdeJjM2bcbvRvv0h0WUbMWrJD3A,4006 +chardet/escsm.py,sha256=AqyXpA2FQFD7k-buBty_7itGEYkhmVa8X09NLRul3QM,12176 +chardet/eucjpprober.py,sha256=5KYaM9fsxkRYzw1b5k0fL-j_-ezIw-ij9r97a9MHxLY,3934 +chardet/euckrfreq.py,sha256=3mHuRvXfsq_QcQysDQFb8qSudvTiol71C6Ic2w57tKM,13566 +chardet/euckrprober.py,sha256=hiFT6wM174GIwRvqDsIcuOc-dDsq2uPKMKbyV8-1Xnc,1753 +chardet/euctwfreq.py,sha256=2alILE1Lh5eqiFJZjzRkMQXolNJRHY5oBQd-vmZYFFM,36913 +chardet/euctwprober.py,sha256=NxbpNdBtU0VFI0bKfGfDkpP7S2_8_6FlO87dVH0ogws,1753 +chardet/gb2312freq.py,sha256=49OrdXzD-HXqwavkqjo8Z7gvs58hONNzDhAyMENNkvY,20735 +chardet/gb2312prober.py,sha256=KPEBueaSLSvBpFeINMu0D6TgHcR90e5PaQawifzF4o0,1759 +chardet/hebrewprober.py,sha256=96T_Lj_OmW-fK7JrSHojYjyG3fsGgbzkoTNleZ3kfYE,14537 +chardet/jisfreq.py,sha256=mm8tfrwqhpOd3wzZKS4NJqkYBQVcDfTM2JiQ5aW932E,25796 +chardet/johabfreq.py,sha256=dBpOYG34GRX6SL8k_LbS9rxZPMjLjoMlgZ03Pz5Hmqc,42498 +chardet/johabprober.py,sha256=O1Qw9nVzRnun7vZp4UZM7wvJSv9W941mEU9uDMnY3DU,1752 +chardet/jpcntx.py,sha256=uhHrYWkLxE_rF5OkHKInm0HUsrjgKHHVQvtt3UcvotA,27055 +chardet/langbulgarianmodel.py,sha256=bGoRpxBYtrbSHa6mX6PkEA26v30pWmhDjemhdxmkew8,104550 +chardet/langgreekmodel.py,sha256=3wMlEzQ8oU2MbrL2xN8lkuOB0dCMLBhW6heekxusoc0,98472 +chardet/langhebrewmodel.py,sha256=ZUTqusxMvR_earWPs5w-rH10xoe5sPjd9FLMu1DUIvE,98184 +chardet/langhungarianmodel.py,sha256=N-YtC2EiswyS7XsUicCPRycrIzRNj47Y048odp9qOoo,101351 +chardet/langrussianmodel.py,sha256=6v7RcZKGj0VH0864BHzizKNceAYbHvGts2p00ifC7w4,128023 +chardet/langthaimodel.py,sha256=Mr673U9U8rkQFfUDtLP01pp-0TOsl2o6sb75YEjvpcs,102762 +chardet/langturkishmodel.py,sha256=LkXCjWhGUEzqKXvfasHN0SFBigwKJ3xeWNVZ0EyI0kA,95360 +chardet/latin1prober.py,sha256=p15EEmFbmQUwbKLC7lOJVGHEZwcG45ubEZYTGu01J5g,5380 +chardet/macromanprober.py,sha256=9anfzmY6TBfUPDyBDOdY07kqmTHpZ1tK0jL-p1JWcOY,6077 +chardet/mbcharsetprober.py,sha256=Wr04WNI4F3X_VxEverNG-H25g7u-MDDKlNt-JGj-_uU,3715 +chardet/mbcsgroupprober.py,sha256=iRpaNBjV0DNwYPu_z6TiHgRpwYahiM7ztI_4kZ4Uz9A,2131 +chardet/mbcssm.py,sha256=hUtPvDYgWDaA2dWdgLsshbwRfm3Q5YRlRogdmeRUNQw,30391 +chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +chardet/metadata/__pycache__/__init__.cpython-310.pyc,, +chardet/metadata/__pycache__/languages.cpython-310.pyc,, +chardet/metadata/languages.py,sha256=FhvBIdZFxRQ-dTwkb_0madRKgVBCaUMQz9I5xqjE5iQ,13560 +chardet/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +chardet/resultdict.py,sha256=ez4FRvN5KaSosJeJ2WzUyKdDdg35HDy_SSLPXKCdt5M,402 +chardet/sbcharsetprober.py,sha256=-nd3F90i7GpXLjehLVHqVBE0KlWzGvQUPETLBNn4o6U,6400 +chardet/sbcsgroupprober.py,sha256=gcgI0fOfgw_3YTClpbra_MNxwyEyJ3eUXraoLHYb59E,4137 +chardet/sjisprober.py,sha256=aqQufMzRw46ZpFlzmYaYeT2-nzmKb-hmcrApppJ862k,4007 +chardet/universaldetector.py,sha256=xYBrg4x0dd9WnT8qclfADVD9ondrUNkqPmvte1pa520,14848 +chardet/utf1632prober.py,sha256=pw1epGdMj1hDGiCu1AHqqzOEfjX8MVdiW7O1BlT8-eQ,8505 +chardet/utf8prober.py,sha256=8m08Ub5490H4jQ6LYXvFysGtgKoKsHUd2zH_i8_TnVw,2812 +chardet/version.py,sha256=jp8ePp1zC63YxruGcHSuKxtf3-fF1LYAMUZD2eDWYok,244 diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d272f6ed555bf206d2a9572524bfa3c0b500fe8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..c36a5e3ee479856d913cae1521b4bd2122587325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +chardetect = chardet.cli.chardetect:main diff --git a/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..79236f25cda563eb57cd7b5838256d9f3fdf18ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/chardet-5.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +chardet diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e4d85da5facc67952bfdc29d8a193924cd6b7212 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/RECORD @@ -0,0 +1,76 @@ +../../../bin/hjson,sha256=IEtY2FUtkPVgLc7HKm3HJFfMlQs7Hq0B_f5tznSZI4Q,238 +hjson-3.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +hjson-3.1.0.dist-info/LICENSE.txt,sha256=WRxR9JxoM7zhpdU-Y3haBGet1nRaWogOTRD05kzETEw,10462 +hjson-3.1.0.dist-info/METADATA,sha256=ws3nEqUXFpWNgmH-FYScGumUSgt2dkiPHNm7RZh-gcU,2610 +hjson-3.1.0.dist-info/RECORD,, +hjson-3.1.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +hjson-3.1.0.dist-info/entry_points.txt,sha256=sghBTcTrqdK0kKuznOZXeEBtjoBnLweHF83S9NK-r0E,42 +hjson-3.1.0.dist-info/top_level.txt,sha256=earxK7niVoW6ADlKRFHCs3JpUL4FBSnJn9qmrYgnEUE,6 +hjson/__init__.py,sha256=Py1drAVt-Xpi2xJAHxVXDdX-NniC4llxZVPjY7MyPCY,26580 +hjson/__pycache__/__init__.cpython-310.pyc,, +hjson/__pycache__/compat.cpython-310.pyc,, +hjson/__pycache__/decoder.cpython-310.pyc,, +hjson/__pycache__/encoder.cpython-310.pyc,, +hjson/__pycache__/encoderH.cpython-310.pyc,, +hjson/__pycache__/ordered_dict.cpython-310.pyc,, +hjson/__pycache__/scanner.cpython-310.pyc,, +hjson/__pycache__/tool.cpython-310.pyc,, +hjson/compat.py,sha256=uvmTYe1Oa18tT_6tFRtYnzTdAkyd73B9zwMF7ZafI04,1036 +hjson/decoder.py,sha256=oHz7g2sQd4S-AJbSzur9VJWqAHDWL25FVQ-G35XlGCA,19563 +hjson/encoder.py,sha256=NhR3YSMVzL3UP8OLAtP2Dr6dW9UbJzjVS-SXp1DzvcY,19168 +hjson/encoderH.py,sha256=wJ8D0gAyR3n6e3MahCIU3OahI5_xuHPWV_ZlI839xCs,20481 +hjson/ordered_dict.py,sha256=DXtgiqkkaNWXDLZ0DGXIjF_CPzGV5qpC-PSeS1zcps8,3370 +hjson/scanner.py,sha256=IL8poQGvCsb82y7qY5jrlSrZ5xcENpPUti3tNKhprYw,1779 +hjson/tests/__init__.py,sha256=_A-1Tn7q7ccNPro_QfbKiXo_bTL9ED5RUX9AeSLG4TA,2011 +hjson/tests/__pycache__/__init__.cpython-310.pyc,, +hjson/tests/__pycache__/test_bigint_as_string.cpython-310.pyc,, +hjson/tests/__pycache__/test_bitsize_int_as_string.cpython-310.pyc,, +hjson/tests/__pycache__/test_check_circular.cpython-310.pyc,, +hjson/tests/__pycache__/test_decimal.cpython-310.pyc,, +hjson/tests/__pycache__/test_decode.cpython-310.pyc,, +hjson/tests/__pycache__/test_default.cpython-310.pyc,, +hjson/tests/__pycache__/test_dump.cpython-310.pyc,, +hjson/tests/__pycache__/test_encode_basestring_ascii.cpython-310.pyc,, +hjson/tests/__pycache__/test_errors.cpython-310.pyc,, +hjson/tests/__pycache__/test_fail.cpython-310.pyc,, +hjson/tests/__pycache__/test_float.cpython-310.pyc,, +hjson/tests/__pycache__/test_for_json.cpython-310.pyc,, +hjson/tests/__pycache__/test_hjson.cpython-310.pyc,, +hjson/tests/__pycache__/test_indent.cpython-310.pyc,, +hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc,, +hjson/tests/__pycache__/test_namedtuple.cpython-310.pyc,, +hjson/tests/__pycache__/test_pass1.cpython-310.pyc,, +hjson/tests/__pycache__/test_pass2.cpython-310.pyc,, +hjson/tests/__pycache__/test_pass3.cpython-310.pyc,, +hjson/tests/__pycache__/test_recursion.cpython-310.pyc,, +hjson/tests/__pycache__/test_scanstring.cpython-310.pyc,, +hjson/tests/__pycache__/test_separators.cpython-310.pyc,, +hjson/tests/__pycache__/test_tool.cpython-310.pyc,, +hjson/tests/__pycache__/test_tuple.cpython-310.pyc,, +hjson/tests/__pycache__/test_unicode.cpython-310.pyc,, +hjson/tests/test_bigint_as_string.py,sha256=bhEtYEXWUhxyi25iLm4sPWFrt5RZ9PfFEknX1cdzP-Y,2265 +hjson/tests/test_bitsize_int_as_string.py,sha256=-73xJ8CE2hDjGOWTERRrYbDHaY0kgBNQXC0g8nIgd4k,2332 +hjson/tests/test_check_circular.py,sha256=64kZhsab6OcwYmJNLRqNW-19dp1UdgYbZiGzopKyR9s,940 +hjson/tests/test_decimal.py,sha256=Qw0IBPSPYoGZXwvXkkM1cz6lpqjkPRzNDBSul-RdR_4,2556 +hjson/tests/test_decode.py,sha256=Sm4052xVjv7ZtZFdRVMsnvQeh2eCNoXv24YOUJJLMdg,4437 +hjson/tests/test_default.py,sha256=WWDLhDVfih4PrenmiEcvshhUOl_bNsm3jML96-AtGmo,224 +hjson/tests/test_dump.py,sha256=5WU4Rd6vsHOwXGpGqQKIw1ZBNgRWUqMY8w3DnJVWfxo,5061 +hjson/tests/test_encode_basestring_ascii.py,sha256=up4y9JMdGXdBXkEjfqwiG-sudSdcKw0RQfO_76za-To,2102 +hjson/tests/test_errors.py,sha256=vg3-z36T9O-UeDHG4ZtW-nQBNAvraWKBrDA70yG989c,1549 +hjson/tests/test_fail.py,sha256=Giinb944NX0bPwBHYUjVZ4ZlNB611Wg0wxVWxv4bDaU,5688 +hjson/tests/test_float.py,sha256=LCUL-2xT8PYq99jQi6-Ddk9pMuC1mLrcJboTfvR08HM,1011 +hjson/tests/test_for_json.py,sha256=ZLtypdX0ALctxMB8c3fQvx3k9OHY5t71gBxGNOXemrc,2778 +hjson/tests/test_hjson.py,sha256=CdvXR05nu8bF_jZ-Hhj3bh8LRi8tdSJTruayj69HoDk,2327 +hjson/tests/test_indent.py,sha256=8oUK5E8DTz1c3RkUU-nOELmr9wOKoaHHOAsxDai66iE,2589 +hjson/tests/test_item_sort_key.py,sha256=piYy-ntwdcb_qS-y8jPFI6rVZlHCNqtTFGnaZSEvWH8,1134 +hjson/tests/test_namedtuple.py,sha256=iK7B95JH4f2L3_MB3rY9NagEVZ1X62JHpjlm1J4t5uM,4066 +hjson/tests/test_pass1.py,sha256=wdnBz55dY4ou8IIdZFypJ_72J6HCtLZw0YesoNOTopQ,1745 +hjson/tests/test_pass2.py,sha256=O389C8IeJ9ysqWKUftXOeHvBi5_47VSdBM_2bCee1SQ,385 +hjson/tests/test_pass3.py,sha256=wwemRholrfv4lwgC0ArnXiS_tfA-NnbRrZqo71YZXaA,481 +hjson/tests/test_recursion.py,sha256=APQAtnchO6KZdduYqvEJDrBOmAKqyORwAX4ldbzAW7A,1694 +hjson/tests/test_scanstring.py,sha256=U-5gUsaO7jEai6zfKMKADfBi-xFg8IHC1dBeElvTsn4,5953 +hjson/tests/test_separators.py,sha256=krudtyZbx0k7OrZZ-jGCZH6yrmsj1seRkUblQLRq4Rw,945 +hjson/tests/test_tool.py,sha256=PyDhrmMTpJSvBXvsWG8P-yrw1BT5kSp6OcSrMLvzmIw,2826 +hjson/tests/test_tuple.py,sha256=FkOyw4s7WZ1HUGxtHYiwaBwhW2thlDB3JFSpUZCbI_I,1976 +hjson/tests/test_unicode.py,sha256=5npz25rj4T3ZhXF_OsSmVveBUtTuz92DQ_cHDe-Jfrw,7099 +hjson/tool.py,sha256=PVlr-lSDWfrGdkh5exWQo_21HGgHLR4OpZ1S41Dqjgo,1897 diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..aabd4f7520077f5f9c18bae61ce8ab5754bc57fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA @@ -0,0 +1,36 @@ +Metadata-Version: 2.1 +Name: nvidia-cusparse-cu12 +Version: 12.1.0.106 +Summary: CUSPARSE native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-nvjitlink-cu12 + +CUSPARSE native runtime libraries diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2cf8818e2b166fe605908388de295e1815bc92eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD @@ -0,0 +1,17 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/cusparse.h,sha256=yhV9iTcEW9XEyhaJmX4iddh_cMb8sfNAy6qva5ae4qw,287290 +nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587 +nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/lib/libcusparse.so.12,sha256=UARmovVZ3mIqcbuSDT0pI-aRNSRXR6J0LuE-3_C6YIU,264876688 +nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA,sha256=XpBtE4L1lFCx7gDu7Klx9dijNWQW26PS3fcOGjNIsXg,1550 +nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD,, +nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/venv/lib/python3.10/site-packages/pandas/__init__.py b/venv/lib/python3.10/site-packages/pandas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2eba20432924304517be99d5113bc9f57614d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/__init__.py @@ -0,0 +1,367 @@ +from __future__ import annotations + +import os +import warnings + +__docformat__ = "restructuredtext" + +# Let users know if they're missing any of our hard dependencies +_hard_dependencies = ("numpy", "pytz", "dateutil") +_missing_dependencies = [] + +for _dependency in _hard_dependencies: + try: + __import__(_dependency) + except ImportError as _e: # pragma: no cover + _missing_dependencies.append(f"{_dependency}: {_e}") + +if _missing_dependencies: # pragma: no cover + raise ImportError( + "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies) + ) +del _hard_dependencies, _dependency, _missing_dependencies + +try: + # numpy compat + from pandas.compat import ( + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401 + ) +except ImportError as _err: # pragma: no cover + _module = _err.name + raise ImportError( + f"C extension: {_module} not built. If you want to import " + "pandas from the source directory, you may need to run " + "'python setup.py build_ext' to build the C extensions first." + ) from _err + +from pandas._config import ( + get_option, + set_option, + reset_option, + describe_option, + option_context, + options, +) + +# let init-time option registration happen +import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401 + +from pandas.core.api import ( + # dtype + ArrowDtype, + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + CategoricalDtype, + PeriodDtype, + IntervalDtype, + DatetimeTZDtype, + StringDtype, + BooleanDtype, + # missing + NA, + isna, + isnull, + notna, + notnull, + # indexes + Index, + CategoricalIndex, + RangeIndex, + MultiIndex, + IntervalIndex, + TimedeltaIndex, + DatetimeIndex, + PeriodIndex, + IndexSlice, + # tseries + NaT, + Period, + period_range, + Timedelta, + timedelta_range, + Timestamp, + date_range, + bdate_range, + Interval, + interval_range, + DateOffset, + # conversion + to_numeric, + to_datetime, + to_timedelta, + # misc + Flags, + Grouper, + factorize, + unique, + value_counts, + NamedAgg, + array, + Categorical, + set_eng_float_format, + Series, + DataFrame, +) + +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.tseries.api import infer_freq +from pandas.tseries import offsets + +from pandas.core.computation.api import eval + +from pandas.core.reshape.api import ( + concat, + lreshape, + melt, + wide_to_long, + merge, + merge_asof, + merge_ordered, + crosstab, + pivot, + pivot_table, + get_dummies, + from_dummies, + cut, + qcut, +) + +from pandas import api, arrays, errors, io, plotting, tseries +from pandas import testing +from pandas.util._print_versions import show_versions + +from pandas.io.api import ( + # excel + ExcelFile, + ExcelWriter, + read_excel, + # parsers + read_csv, + read_fwf, + read_table, + # pickle + read_pickle, + to_pickle, + # pytables + HDFStore, + read_hdf, + # sql + read_sql, + read_sql_query, + read_sql_table, + # misc + read_clipboard, + read_parquet, + read_orc, + read_feather, + read_gbq, + read_html, + read_xml, + read_json, + read_stata, + read_sas, + read_spss, +) + +from pandas.io.json._normalize import json_normalize + +from pandas.util._tester import test + +# use the closest tagged version if possible +_built_with_meson = False +try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __version__, + __git_version__, + ) + + _built_with_meson = True +except ImportError: + from pandas._version import get_versions + + v = get_versions() + __version__ = v.get("closest-tag", v["version"]) + __git_version__ = v.get("full-revisionid") + del get_versions, v + +# GH#55043 - deprecation of the data_manager option +if "PANDAS_DATA_MANAGER" in os.environ: + warnings.warn( + "The env variable PANDAS_DATA_MANAGER is set. The data_manager option is " + "deprecated and will be removed in a future version. Only the BlockManager " + "will be available. Unset this environment variable to silence this warning.", + FutureWarning, + stacklevel=2, + ) + +del warnings, os + +# module level doc-string +__doc__ = """ +pandas - a powerful data analysis and manipulation library for Python +===================================================================== + +**pandas** is a Python package providing fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way toward this goal. + +Main Features +------------- +Here are just a few of the things that pandas does well: + + - Easy handling of missing data in floating point as well as non-floating + point data. + - Size mutability: columns can be inserted and deleted from DataFrame and + higher dimensional objects + - Automatic and explicit data alignment: objects can be explicitly aligned + to a set of labels, or the user can simply ignore the labels and let + `Series`, `DataFrame`, etc. automatically align the data for you in + computations. + - Powerful, flexible group by functionality to perform split-apply-combine + operations on data sets, for both aggregating and transforming data. + - Make it easy to convert ragged, differently-indexed data in other Python + and NumPy data structures into DataFrame objects. + - Intelligent label-based slicing, fancy indexing, and subsetting of large + data sets. + - Intuitive merging and joining data sets. + - Flexible reshaping and pivoting of data sets. + - Hierarchical labeling of axes (possible to have multiple labels per tick). + - Robust IO tools for loading data from flat files (CSV and delimited), + Excel files, databases, and saving/loading data from the ultrafast HDF5 + format. + - Time series-specific functionality: date range generation and frequency + conversion, moving window statistics, date shifting and lagging. +""" + +# Use __all__ to let type checkers know what is part of the public API. +# Pandas is not (yet) a py.typed library: the public API is determined +# based on the documentation. +__all__ = [ + "ArrowDtype", + "BooleanDtype", + "Categorical", + "CategoricalDtype", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "DatetimeTZDtype", + "ExcelFile", + "ExcelWriter", + "Flags", + "Float32Dtype", + "Float64Dtype", + "Grouper", + "HDFStore", + "Index", + "IndexSlice", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "Int8Dtype", + "Interval", + "IntervalDtype", + "IntervalIndex", + "MultiIndex", + "NA", + "NaT", + "NamedAgg", + "Period", + "PeriodDtype", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "UInt8Dtype", + "api", + "array", + "arrays", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "describe_option", + "errors", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "get_option", + "infer_freq", + "interval_range", + "io", + "isna", + "isnull", + "json_normalize", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "notna", + "notnull", + "offsets", + "option_context", + "options", + "period_range", + "pivot", + "pivot_table", + "plotting", + "qcut", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "reset_option", + "set_eng_float_format", + "set_option", + "show_versions", + "test", + "testing", + "timedelta_range", + "to_datetime", + "to_numeric", + "to_pickle", + "to_timedelta", + "tseries", + "unique", + "value_counts", + "wide_to_long", +] diff --git a/venv/lib/python3.10/site-packages/pandas/_typing.py b/venv/lib/python3.10/site-packages/pandas/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..3df9a47a35fca32547947560a8df1cea1d1863c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_typing.py @@ -0,0 +1,525 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + MutableMapping, + Sequence, +) +from datetime import ( + date, + datetime, + timedelta, + tzinfo, +) +from os import PathLike +import sys +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Protocol, + Type as type_t, + TypeVar, + Union, + overload, +) + +import numpy as np + +# To prevent import cycles place any internal imports in the branch below +# and use a string literal forward reference to it in subsequent types +# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles +if TYPE_CHECKING: + import numpy.typing as npt + + from pandas._libs import ( + NaTType, + Period, + Timedelta, + Timestamp, + ) + from pandas._libs.tslibs import BaseOffset + + from pandas.core.dtypes.dtypes import ExtensionDtype + + from pandas import Interval + from pandas.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + from pandas.core.frame import DataFrame + from pandas.core.generic import NDFrame + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + GroupBy, + SeriesGroupBy, + ) + from pandas.core.indexes.base import Index + from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, + ) + from pandas.core.resample import Resampler + from pandas.core.series import Series + from pandas.core.window.rolling import BaseWindow + + from pandas.io.formats.format import EngFormatter + from pandas.tseries.holiday import AbstractHolidayCalendar + + ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, + ] + + # numpy compatible types + NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] + # Name "npt._ArrayLikeInt_co" is not defined [name-defined] + NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] + + from typing import SupportsIndex + + if sys.version_info >= (3, 10): + from typing import TypeGuard # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport] + + if sys.version_info >= (3, 11): + from typing import Self # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import Self # pyright: ignore[reportUnusedImport] +else: + npt: Any = None + Self: Any = None + TypeGuard: Any = None + +HashableT = TypeVar("HashableT", bound=Hashable) +MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping) + +# array-like + +ArrayLike = Union["ExtensionArray", np.ndarray] +AnyArrayLike = Union[ArrayLike, "Index", "Series"] +TimeArrayLike = Union["DatetimeArray", "TimedeltaArray"] + +# list-like + +# from https://github.com/hauntsaninja/useful_types +# includes Sequence-like objects but excludes str and bytes +_T_co = TypeVar("_T_co", covariant=True) + + +class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: + ... + + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: + ... + + def __contains__(self, value: object, /) -> bool: + ... + + def __len__(self) -> int: + ... + + def __iter__(self) -> Iterator[_T_co]: + ... + + def index(self, value: Any, /, start: int = 0, stop: int = ...) -> int: + ... + + def count(self, value: Any, /) -> int: + ... + + def __reversed__(self) -> Iterator[_T_co]: + ... + + +ListLike = Union[AnyArrayLike, SequenceNotStr, range] + +# scalars + +PythonScalar = Union[str, float, bool] +DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] +PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] +Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] +IntStrT = TypeVar("IntStrT", bound=Union[int, str]) + + +# timestamp and timedelta convertible types + +TimestampConvertibleTypes = Union[ + "Timestamp", date, np.datetime64, np.int64, float, str +] +TimestampNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +TimedeltaConvertibleTypes = Union[ + "Timedelta", timedelta, np.timedelta64, np.int64, float, str +] +Timezone = Union[str, tzinfo] + +ToTimestampHow = Literal["s", "e", "start", "end"] + +# NDFrameT is stricter and ensures that the same subclass of NDFrame always is +# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a +# Series is passed into a function, a Series is always returned and if a DataFrame is +# passed in, a DataFrame is always returned. +NDFrameT = TypeVar("NDFrameT", bound="NDFrame") + +NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") + +AxisInt = int +Axis = Union[AxisInt, Literal["index", "columns", "rows"]] +IndexLabel = Union[Hashable, Sequence[Hashable]] +Level = Hashable +Shape = tuple[int, ...] +Suffixes = tuple[Optional[str], Optional[str]] +Ordered = Optional[bool] +JSONSerializable = Optional[Union[PythonScalar, list, dict]] +Frequency = Union[str, "BaseOffset"] +Axes = ListLike + +RandomState = Union[ + int, + np.ndarray, + np.random.Generator, + np.random.BitGenerator, + np.random.RandomState, +] + +# dtypes +NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] +Dtype = Union["ExtensionDtype", NpDtype] +AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] +# DtypeArg specifies all allowable dtypes in a functions its dtype argument +DtypeArg = Union[Dtype, dict[Hashable, Dtype]] +DtypeObj = Union[np.dtype, "ExtensionDtype"] + +# converters +ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]] + +# parse_dates +ParseDatesArg = Union[ + bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]] +] + +# For functions like rename that convert one label to another +Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] + +# to maintain type information across generic functions and parametrization +T = TypeVar("T") + +# used in decorators to preserve the signature of the function it decorates +# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar("F", bound=FuncType) + +# types of vectorized key functions for DataFrame::sort_values and +# DataFrame::sort_index, among others +ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] +IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncTypeDict = MutableMapping[ + Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]] +] +AggFuncType = Union[ + AggFuncTypeBase, + list[AggFuncTypeBase], + AggFuncTypeDict, +] +AggObjType = Union[ + "Series", + "DataFrame", + "GroupBy", + "SeriesGroupBy", + "DataFrameGroupBy", + "BaseWindow", + "Resampler", +] + +PythonFuncType = Callable[[Any], Any] + +# filenames and file-like-objects +AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) +AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True) + + +class BaseBuffer(Protocol): + @property + def mode(self) -> str: + # for _get_filepath_or_buffer + ... + + def seek(self, __offset: int, __whence: int = ...) -> int: + # with one argument: gzip.GzipFile, bz2.BZ2File + # with two arguments: zip.ZipFile, read_sas + ... + + def seekable(self) -> bool: + # for bz2.BZ2File + ... + + def tell(self) -> int: + # for zip.ZipFile, read_stata, to_stata + ... + + +class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): + def read(self, __n: int = ...) -> AnyStr_co: + # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File + ... + + +class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): + def write(self, __b: AnyStr_contra) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + def flush(self) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + +class ReadPickleBuffer(ReadBuffer[bytes], Protocol): + def readline(self) -> bytes: + ... + + +class WriteExcelBuffer(WriteBuffer[bytes], Protocol): + def truncate(self, size: int | None = ...) -> int: + ... + + +class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): + def __iter__(self) -> Iterator[AnyStr_co]: + # for engine=python + ... + + def fileno(self) -> int: + # for _MMapWrapper + ... + + def readline(self) -> AnyStr_co: + # for engine=python + ... + + @property + def closed(self) -> bool: + # for enine=pyarrow + ... + + +FilePath = Union[str, "PathLike[str]"] + +# for arbitrary kwargs passed during reading/writing files +StorageOptions = Optional[dict[str, Any]] + + +# compression keywords and compression +CompressionDict = dict[str, Any] +CompressionOptions = Optional[ + Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] +] + +# types in DataFrameFormatter +FormattersType = Union[ + list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable] +] +ColspaceType = Mapping[Hashable, Union[str, int]] +FloatFormatType = Union[str, Callable, "EngFormatter"] +ColspaceArgType = Union[ + str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] +] + +# Arguments for fillna() +FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"] +InterpolateOptions = Literal[ + "linear", + "time", + "index", + "values", + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "polynomial", + "krogh", + "piecewise_polynomial", + "spline", + "pchip", + "akima", + "cubicspline", + "from_derivatives", +] + +# internals +Manager = Union[ + "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" +] +SingleManager = Union["SingleArrayManager", "SingleBlockManager"] +Manager2D = Union["ArrayManager", "BlockManager"] + +# indexing +# PositionalIndexer -> valid 1D positional indexer, e.g. can pass +# to ndarray.__getitem__ +# ScalarIndexer is for a single value as the index +# SequenceIndexer is for list like or slices (but not tuples) +# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays +# These are used in various __getitem__ overloads +# TODO(typing#684): add Ellipsis, see +# https://github.com/python/typing/issues/684#issuecomment-548203158 +# https://bugs.python.org/issue41810 +# Using List[int] here rather than Sequence[int] to disallow tuples. +ScalarIndexer = Union[int, np.integer] +SequenceIndexer = Union[slice, list[int], np.ndarray] +PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] +PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer] +PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] +if TYPE_CHECKING: + TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] +else: + TakeIndexer = Any + +# Shared by functions such as drop and astype +IgnoreRaise = Literal["ignore", "raise"] + +# Windowing rank methods +WindowingRankType = Literal["average", "min", "max"] + +# read_csv engines +CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] + +# read_json engines +JSONEngine = Literal["ujson", "pyarrow"] + +# read_xml parsers +XMLParsers = Literal["lxml", "etree"] + +# read_html flavors +HTMLFlavors = Literal["lxml", "html5lib", "bs4"] + +# Interval closed type +IntervalLeftRight = Literal["left", "right"] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] + +# datetime and NaTType +DatetimeNaTType = Union[datetime, "NaTType"] +DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] + +# sort_index +SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] +NaPosition = Literal["first", "last"] + +# Arguments for nsmalles and n_largest +NsmallestNlargestKeep = Literal["first", "last", "all"] + +# quantile interpolation +QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"] + +# plotting +PlottingOrientation = Literal["horizontal", "vertical"] + +# dropna +AnyAll = Literal["any", "all"] + +# merge +MergeHow = Literal["left", "right", "inner", "outer", "cross"] +MergeValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# join +JoinHow = Literal["left", "right", "inner", "outer"] +JoinValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# reindex +ReindexMethod = Union[FillnaOptions, Literal["nearest"]] + +MatplotlibColor = Union[str, Sequence[float]] +TimeGrouperOrigin = Union[ + "Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"] +] +TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"] +TimeNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +DropKeep = Literal["first", "last", False] +CorrelationMethod = Union[ + Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float] +] +AlignJoin = Literal["outer", "inner", "left", "right"] +DtypeBackend = Literal["pyarrow", "numpy_nullable"] + +TimeUnit = Literal["s", "ms", "us", "ns"] +OpenFileErrors = Literal[ + "strict", + "ignore", + "replace", + "surrogateescape", + "xmlcharrefreplace", + "backslashreplace", + "namereplace", +] + +# update +UpdateJoin = Literal["left"] + +# applymap +NaAction = Literal["ignore"] + +# from_dict +FromDictOrient = Literal["columns", "index", "tight"] + +# to_gbc +ToGbqIfexist = Literal["fail", "replace", "append"] + +# to_stata +ToStataByteorder = Literal[">", "<", "little", "big"] + +# ExcelWriter +ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"] + +# Offsets +OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"] + +# read_csv: usecols +UsecolsArgType = Union[ + SequenceNotStr[Hashable], + range, + AnyArrayLike, + Callable[[HashableT], bool], + None, +] diff --git a/venv/lib/python3.10/site-packages/pandas/_version.py b/venv/lib/python3.10/site-packages/pandas/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a960630126d021d86f685a160b98cb5eada197 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_version.py @@ -0,0 +1,692 @@ +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.28 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import functools +import os +import re +import subprocess +import sys +from typing import Callable + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "pandas-" + cfg.versionfile_source = "pandas/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: dict[str, str] = {} +HANDLERS: dict[str, dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + dispcmd = str([command] + args) + try: + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen( + [command] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + **popen_kwargs, + ) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print(f"unable to run {dispcmd}") + print(e) + return None, None + else: + if verbose: + print(f"unable to find command, tried {commands}") + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print( + f"Tried directories {str(rootdirs)} \ + but none started with prefix {parentdir_prefix}" + ) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, encoding="utf-8") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r"\d", r)} + if verbose: + print(f"discarding '{','.join(refs - tags)}', no digits") + if verbose: + print(f"likely tags: {','.join(sorted(tags))}") + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix) :] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r"\d", r): + continue + if verbose: + print(f"picking {r}") + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) + if rc != 0: + if verbose: + print(f"Directory {root} not under git control") + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + f"{tag_prefix}[[:digit:]]*", + ], + cwd=root, + ) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[: git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces[ + "error" + ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix) :] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += f".post{post_version + 1}.dev{pieces['distance']}" + else: + rendered += f".post0.dev{pieces['distance']}" + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = f"0.post0.dev{pieces['distance']}" + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError(f"unknown style '{style}'") + + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split("/"): + root = os.path.dirname(root) + except NameError: + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/venv/lib/python3.10/site-packages/pandas/_version_meson.py b/venv/lib/python3.10/site-packages/pandas/_version_meson.py new file mode 100644 index 0000000000000000000000000000000000000000..360ff90b89f68a2b2188f060b314ee2373df71de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_version_meson.py @@ -0,0 +1,2 @@ +__version__="2.2.2" +__git_version__="d9cdd2ee5a58015ef6f4d15c7226110c9aab8140" diff --git a/venv/lib/python3.10/site-packages/pandas/api/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d42b6541fdf8817b996ef9804db8a87a2bcd2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/__init__.py @@ -0,0 +1,16 @@ +""" public toolkit API """ +from pandas.api import ( + extensions, + indexers, + interchange, + types, + typing, +) + +__all__ = [ + "interchange", + "extensions", + "indexers", + "types", + "typing", +] diff --git a/venv/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd3840045f8f355d5e0117130a2c69d1d1ad8ae9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/api/extensions/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5f1ba926899f9d11e34e70181ed77cae7ead1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/extensions/__init__.py @@ -0,0 +1,33 @@ +""" +Public API for extending pandas objects. +""" + +from pandas._libs.lib import no_default + +from pandas.core.dtypes.base import ( + ExtensionDtype, + register_extension_dtype, +) + +from pandas.core.accessor import ( + register_dataframe_accessor, + register_index_accessor, + register_series_accessor, +) +from pandas.core.algorithms import take +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) + +__all__ = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", +] diff --git a/venv/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3423dfbc1c3d25de05ab59e27d3c6f851013ce8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/api/indexers/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/indexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78357f11dc3b79f13490b91c69ef5457fbfa9768 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/indexers/__init__.py @@ -0,0 +1,17 @@ +""" +Public API for Rolling Window Indexers. +""" + +from pandas.core.indexers import check_array_indexer +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedForwardWindowIndexer, + VariableOffsetWindowIndexer, +) + +__all__ = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", +] diff --git a/venv/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2d9a94d681ea9cf373316a224c2ea06d0897f59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/api/interchange/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3a73bc46b3109c3c13e1a3468a69aed2ffb2e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/interchange/__init__.py @@ -0,0 +1,8 @@ +""" +Public API for DataFrame interchange protocol. +""" + +from pandas.core.interchange.dataframe_protocol import DataFrame +from pandas.core.interchange.from_dataframe import from_dataframe + +__all__ = ["from_dataframe", "DataFrame"] diff --git a/venv/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b707522a32f4f91176263c02c9ebe2e3fab191d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/api/types/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c601086bb9f86a633d6a3f2245779fea9428bf95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/types/__init__.py @@ -0,0 +1,23 @@ +""" +Public toolkit API. +""" + +from pandas._libs.lib import infer_dtype + +from pandas.core.dtypes.api import * # noqa: F403 +from pandas.core.dtypes.concat import union_categoricals +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +__all__ = [ + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", +] diff --git a/venv/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..332ee3bae1c1276c903601075b353aa902a495d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/api/typing/__init__.py b/venv/lib/python3.10/site-packages/pandas/api/typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b5d2cb06b523508b15025be804a66daaaaf7a45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/api/typing/__init__.py @@ -0,0 +1,55 @@ +""" +Public API classes that store intermediate results useful for type-hinting. +""" + +from pandas._libs import NaTType +from pandas._libs.missing import NAType + +from pandas.core.groupby import ( + DataFrameGroupBy, + SeriesGroupBy, +) +from pandas.core.resample import ( + DatetimeIndexResamplerGroupby, + PeriodIndexResamplerGroupby, + Resampler, + TimedeltaIndexResamplerGroupby, + TimeGrouper, +) +from pandas.core.window import ( + Expanding, + ExpandingGroupby, + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, + Rolling, + RollingGroupby, + Window, +) + +# TODO: Can't import Styler without importing jinja2 +# from pandas.io.formats.style import Styler +from pandas.io.json._json import JsonReader +from pandas.io.stata import StataReader + +__all__ = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + # See TODO above + # "Styler", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", +] diff --git a/venv/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16fb3141dfd0dfa89cf9e929326e4be74ee491b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/conftest.py b/venv/lib/python3.10/site-packages/pandas/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..7c35dfdde90ba75d277c7817e97df5506a237fac --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/conftest.py @@ -0,0 +1,1965 @@ +""" +This file is very long and growing, but it was decided to not split it yet, as +it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989 + +Instead of splitting it was decided to define sections here: +- Configuration / Settings +- Autouse fixtures +- Common arguments +- Missing values & co. +- Classes +- Indices +- Series' +- DataFrames +- Operators & Operations +- Data sets/files +- Time zones +- Dtypes +- Misc +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, +) +from decimal import Decimal +import operator +import os +from typing import ( + TYPE_CHECKING, + Callable, +) + +from dateutil.tz import ( + tzlocal, + tzutc, +) +import hypothesis +from hypothesis import strategies as st +import numpy as np +import pytest +from pytz import ( + FixedOffset, + utc, +) + +from pandas._config.config import _get_option + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Interval, + IntervalIndex, + Period, + RangeIndex, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) +from pandas.util.version import Version + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +try: + import pyarrow as pa +except ImportError: + has_pyarrow = False +else: + del pa + has_pyarrow = True + +import zoneinfo + +try: + zoneinfo.ZoneInfo("UTC") +except zoneinfo.ZoneInfoNotFoundError: + zoneinfo = None # type: ignore[assignment] + + +# ---------------------------------------------------------------- +# Configuration / Settings +# ---------------------------------------------------------------- +# pytest + + +def pytest_addoption(parser) -> None: + parser.addoption( + "--no-strict-data-files", + action="store_false", + help="Don't fail if a test is skipped for missing data file.", + ) + + +def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None: + """Ignore doctest warning. + + Parameters + ---------- + item : pytest.Item + pytest test item. + path : str + Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A + warning will be filtered when item.name ends with in given path. So it is + sufficient to specify e.g. "DataFrame.append". + message : str + Message to be filtered. + """ + if item.name.endswith(path): + item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}")) + + +def pytest_collection_modifyitems(items, config) -> None: + is_doctest = config.getoption("--doctest-modules") or config.getoption( + "--doctest-cython", default=False + ) + + # Warnings from doctests that can be ignored; place reason in comment above. + # Each entry specifies (path, message) - see the ignore_doctest_warning function + ignored_doctest_warnings = [ + ("is_int64_dtype", "is_int64_dtype is deprecated"), + ("is_interval_dtype", "is_interval_dtype is deprecated"), + ("is_period_dtype", "is_period_dtype is deprecated"), + ("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"), + ("is_categorical_dtype", "is_categorical_dtype is deprecated"), + ("is_sparse", "is_sparse is deprecated"), + ("DataFrameGroupBy.fillna", "DataFrameGroupBy.fillna is deprecated"), + ("NDFrame.replace", "The 'method' keyword"), + ("NDFrame.replace", "Series.replace without 'value'"), + ("NDFrame.clip", "Downcasting behavior in Series and DataFrame methods"), + ("Series.idxmin", "The behavior of Series.idxmin"), + ("Series.idxmax", "The behavior of Series.idxmax"), + ("SeriesGroupBy.fillna", "SeriesGroupBy.fillna is deprecated"), + ("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"), + ("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"), + # Docstring divides by zero to show behavior difference + ("missing.mask_zero_div_zero", "divide by zero encountered"), + ( + "to_pydatetime", + "The behavior of DatetimeProperties.to_pydatetime is deprecated", + ), + ( + "pandas.core.generic.NDFrame.bool", + "(Series|DataFrame).bool is now deprecated and will be removed " + "in future version of pandas", + ), + ( + "pandas.core.generic.NDFrame.first", + "first is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + ), + ( + "Resampler.fillna", + "DatetimeIndexResampler.fillna is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrameGroupBy.fillna with 'method' is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrame.fillna with 'method' is deprecated", + ), + ("read_parquet", "Passing a BlockManager to DataFrame is deprecated"), + ] + + if is_doctest: + for item in items: + for path, message in ignored_doctest_warnings: + ignore_doctest_warning(item, path, message) + + +hypothesis_health_checks = [hypothesis.HealthCheck.too_slow] +if Version(hypothesis.__version__) >= Version("6.83.2"): + hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors) + +# Hypothesis +hypothesis.settings.register_profile( + "ci", + # Hypothesis timing checks are tuned for scalars by default, so we bump + # them from 200ms to 500ms per test case as the global default. If this + # is too short for a specific test, (a) try to make it faster, and (b) + # if it really is slow add `@settings(deadline=...)` with a working value, + # or `deadline=None` to entirely disable timeouts for that test. + # 2022-02-09: Changed deadline from 500 -> None. Deadline leads to + # non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969) + deadline=None, + suppress_health_check=tuple(hypothesis_health_checks), +) +hypothesis.settings.load_profile("ci") + +# Registering these strategies makes them globally available via st.from_type, +# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py +for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans()) + ) + +for name in "YearBegin YearEnd BYearBegin BYearEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-5, 5), + normalize=st.booleans(), + month=st.integers(min_value=1, max_value=12), + ), + ) + +for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-24, 24), + normalize=st.booleans(), + startingMonth=st.integers(min_value=1, max_value=12), + ), + ) + + +# ---------------------------------------------------------------- +# Autouse fixtures +# ---------------------------------------------------------------- + + +# https://github.com/pytest-dev/pytest/issues/11873 +# Would like to avoid autouse=True, but cannot as of pytest 8.0.0 +@pytest.fixture(autouse=True) +def add_doctest_imports(doctest_namespace) -> None: + """ + Make `np` and `pd` names available for doctests. + """ + doctest_namespace["np"] = np + doctest_namespace["pd"] = pd + + +@pytest.fixture(autouse=True) +def configure_tests() -> None: + """ + Configure settings for all tests and test modules. + """ + pd.set_option("chained_assignment", "raise") + + +# ---------------------------------------------------------------- +# Common arguments +# ---------------------------------------------------------------- +@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}") +def axis(request): + """ + Fixture for returning the axis numbers of a DataFrame. + """ + return request.param + + +axis_frame = axis + + +@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}") +def axis_1(request): + """ + Fixture for returning aliases of axis 1 of a DataFrame. + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def observed(request): + """ + Pass in the observed keyword to groupby for [True, False] + This indicates whether categoricals should return values for + values which are not in the grouper [False / None], or only values which + appear in the grouper [True]. [None] is supported for future compatibility + if we decide to change the default (and would need to warn if this + parameter is not passed). + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def ordered(request): + """ + Boolean 'ordered' parameter for Categorical. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def skipna(request): + """ + Boolean 'skipna' parameter. + """ + return request.param + + +@pytest.fixture(params=["first", "last", False]) +def keep(request): + """ + Valid values for the 'keep' parameter used in + .duplicated or .drop_duplicates + """ + return request.param + + +@pytest.fixture(params=["both", "neither", "left", "right"]) +def inclusive_endpoints_fixture(request): + """ + Fixture for trying all interval 'inclusive' parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def closed(request): + """ + Fixture for trying all interval closed parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def other_closed(request): + """ + Secondary closed fixture to allow parametrizing over all pairs of closed. + """ + return request.param + + +@pytest.fixture( + params=[ + None, + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression(request): + """ + Fixture for trying common compression types in compression tests. + """ + return request.param + + +@pytest.fixture( + params=[ + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression_only(request): + """ + Fixture for trying common compression types in compression tests excluding + uncompressed case. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def writable(request): + """ + Fixture that an array is writable. + """ + return request.param + + +@pytest.fixture(params=["inner", "outer", "left", "right"]) +def join_type(request): + """ + Fixture for trying all types of join operations. + """ + return request.param + + +@pytest.fixture(params=["nlargest", "nsmallest"]) +def nselect_method(request): + """ + Fixture for trying all nselect methods. + """ + return request.param + + +# ---------------------------------------------------------------- +# Missing values & co. +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__) +def nulls_fixture(request): + """ + Fixture for each null type in pandas. + """ + return request.param + + +nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture + + +@pytest.fixture(params=[None, np.nan, pd.NaT]) +def unique_nulls_fixture(request): + """ + Fixture for each null type in pandas, each null type exactly once. + """ + return request.param + + +# Generate cartesian product of unique_nulls_fixture: +unique_nulls_fixture2 = unique_nulls_fixture + + +@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__) +def np_nat_fixture(request): + """ + Fixture for each NaT type in numpy. + """ + return request.param + + +# Generate cartesian product of np_nat_fixture: +np_nat_fixture2 = np_nat_fixture + + +# ---------------------------------------------------------------- +# Classes +# ---------------------------------------------------------------- + + +@pytest.fixture(params=[DataFrame, Series]) +def frame_or_series(request): + """ + Fixture to parametrize over DataFrame and Series. + """ + return request.param + + +@pytest.fixture(params=[Index, Series], ids=["index", "series"]) +def index_or_series(request): + """ + Fixture to parametrize over Index and Series, made necessary by a mypy + bug, giving an error: + + List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]" + + See GH#29725 + """ + return request.param + + +# Generate cartesian product of index_or_series fixture: +index_or_series2 = index_or_series + + +@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"]) +def index_or_series_or_array(request): + """ + Fixture to parametrize over Index, Series, and ExtensionArray + """ + return request.param + + +@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__) +def box_with_array(request): + """ + Fixture to test behavior for Index, Series, DataFrame, and pandas Array + classes + """ + return request.param + + +box_with_array2 = box_with_array + + +@pytest.fixture +def dict_subclass() -> type[dict]: + """ + Fixture for a dictionary subclass. + """ + + class TestSubDict(dict): + def __init__(self, *args, **kwargs) -> None: + dict.__init__(self, *args, **kwargs) + + return TestSubDict + + +@pytest.fixture +def non_dict_mapping_subclass() -> type[abc.Mapping]: + """ + Fixture for a non-mapping dictionary subclass. + """ + + class TestNonDictMapping(abc.Mapping): + def __init__(self, underlying_dict) -> None: + self._data = underlying_dict + + def __getitem__(self, key): + return self._data.__getitem__(key) + + def __iter__(self) -> Iterator: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + return TestNonDictMapping + + +# ---------------------------------------------------------------- +# Indices +# ---------------------------------------------------------------- +@pytest.fixture +def multiindex_year_month_day_dataframe_random_data(): + """ + DataFrame with 3 level MultiIndex (year, month, day) covering + first 100 business days from 2000-01-01 with random data + """ + tdf = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) + ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() + # use int64 Index, to make sure things work + ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) + ymd.index.set_names(["year", "month", "day"], inplace=True) + return ymd + + +@pytest.fixture +def lexsorted_two_level_string_multiindex() -> MultiIndex: + """ + 2-level MultiIndex, lexsorted, with string names. + """ + return MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + + +@pytest.fixture +def multiindex_dataframe_random_data( + lexsorted_two_level_string_multiindex, +) -> DataFrame: + """DataFrame with 2 level MultiIndex with random data""" + index = lexsorted_two_level_string_multiindex + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=Index(["A", "B", "C"], name="exp"), + ) + + +def _create_multiindex(): + """ + MultiIndex used to test the general functionality of this object + """ + + # See Also: tests.multi.conftest.idx + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + return MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + + +def _create_mi_with_dt64tz_level(): + """ + MultiIndex with a level that is a tzaware DatetimeIndex. + """ + # GH#8367 round trip with pickle + return MultiIndex.from_product( + [[1, 2], ["a", "b"], date_range("20130101", periods=3, tz="US/Eastern")], + names=["one", "two", "three"], + ) + + +indices_dict = { + "string": Index([f"pandas_{i}" for i in range(100)]), + "datetime": date_range("2020-01-01", periods=100), + "datetime-tz": date_range("2020-01-01", periods=100, tz="US/Pacific"), + "period": period_range("2020-01-01", periods=100, freq="D"), + "timedelta": timedelta_range(start="1 day", periods=100, freq="D"), + "range": RangeIndex(100), + "int8": Index(np.arange(100), dtype="int8"), + "int16": Index(np.arange(100), dtype="int16"), + "int32": Index(np.arange(100), dtype="int32"), + "int64": Index(np.arange(100), dtype="int64"), + "uint8": Index(np.arange(100), dtype="uint8"), + "uint16": Index(np.arange(100), dtype="uint16"), + "uint32": Index(np.arange(100), dtype="uint32"), + "uint64": Index(np.arange(100), dtype="uint64"), + "float32": Index(np.arange(100), dtype="float32"), + "float64": Index(np.arange(100), dtype="float64"), + "bool-object": Index([True, False] * 5, dtype=object), + "bool-dtype": Index([True, False] * 5, dtype=bool), + "complex64": Index( + np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64") + ), + "complex128": Index( + np.arange(100, dtype="complex128") + 1.0j * np.arange(100, dtype="complex128") + ), + "categorical": CategoricalIndex(list("abcd") * 25), + "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)), + "empty": Index([]), + "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), + "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), + "multi": _create_multiindex(), + "repeats": Index([0, 0, 1, 1, 2, 2]), + "nullable_int": Index(np.arange(100), dtype="Int64"), + "nullable_uint": Index(np.arange(100), dtype="UInt16"), + "nullable_float": Index(np.arange(100), dtype="Float32"), + "nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"), + "string-python": Index( + pd.array([f"pandas_{i}" for i in range(100)], dtype="string[python]") + ), +} +if has_pyarrow: + idx = Index(pd.array([f"pandas_{i}" for i in range(100)], dtype="string[pyarrow]")) + indices_dict["string-pyarrow"] = idx + + +@pytest.fixture(params=indices_dict.keys()) +def index(request): + """ + Fixture for many "simple" kinds of indices. + + These indices are unlikely to cover corner cases, e.g. + - no names + - no NaTs/NaNs + - no values near implementation bounds + - ... + """ + # copy to avoid mutation, e.g. setting .name + return indices_dict[request.param].copy() + + +# Needed to generate cartesian product of indices +index_fixture2 = index + + +@pytest.fixture( + params=[ + key for key, value in indices_dict.items() if not isinstance(value, MultiIndex) + ] +) +def index_flat(request): + """ + index fixture, but excluding MultiIndex cases. + """ + key = request.param + return indices_dict[key].copy() + + +# Alias so we can test with cartesian product of index_flat +index_flat2 = index_flat + + +@pytest.fixture( + params=[ + key + for key, value in indices_dict.items() + if not ( + key.startswith(("int", "uint", "float")) + or key in ["range", "empty", "repeats", "bool-dtype"] + ) + and not isinstance(value, MultiIndex) + ] +) +def index_with_missing(request): + """ + Fixture for indices with missing values. + + Integer-dtype and empty cases are excluded because they cannot hold missing + values. + + MultiIndex is excluded because isna() is not defined for MultiIndex. + """ + + # GH 35538. Use deep copy to avoid illusive bug on np-dev + # GHA pipeline that writes into indices_dict despite copy + ind = indices_dict[request.param].copy(deep=True) + vals = ind.values.copy() + if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: + # For setting missing values in the top level of MultiIndex + vals = ind.tolist() + vals[0] = (None,) + vals[0][1:] + vals[-1] = (None,) + vals[-1][1:] + return MultiIndex.from_tuples(vals) + else: + vals[0] = None + vals[-1] = None + return type(ind)(vals) + + +# ---------------------------------------------------------------- +# Series' +# ---------------------------------------------------------------- +@pytest.fixture +def string_series() -> Series: + """ + Fixture for Series of floats with Index of unique strings + """ + return Series( + np.arange(30, dtype=np.float64) * 1.1, + index=Index([f"i_{i}" for i in range(30)], dtype=object), + name="series", + ) + + +@pytest.fixture +def object_series() -> Series: + """ + Fixture for Series of dtype object with Index of unique strings + """ + data = [f"foo_{i}" for i in range(30)] + index = Index([f"bar_{i}" for i in range(30)], dtype=object) + return Series(data, index=index, name="objects", dtype=object) + + +@pytest.fixture +def datetime_series() -> Series: + """ + Fixture for Series of floats with DatetimeIndex + """ + return Series( + np.random.default_rng(2).standard_normal(30), + index=date_range("2000-01-01", periods=30, freq="B"), + name="ts", + ) + + +def _create_series(index): + """Helper for the _series dict""" + size = len(index) + data = np.random.default_rng(2).standard_normal(size) + return Series(data, index=index, name="a", copy=False) + + +_series = { + f"series-with-{index_id}-index": _create_series(index) + for index_id, index in indices_dict.items() +} + + +@pytest.fixture +def series_with_simple_index(index) -> Series: + """ + Fixture for tests on series with changing types of indices. + """ + return _create_series(index) + + +_narrow_series = { + f"{dtype.__name__}-series": Series( + range(30), index=[f"i-{i}" for i in range(30)], name="a", dtype=dtype + ) + for dtype in tm.NARROW_NP_DTYPES +} + + +_index_or_series_objs = {**indices_dict, **_series, **_narrow_series} + + +@pytest.fixture(params=_index_or_series_objs.keys()) +def index_or_series_obj(request): + """ + Fixture for tests on indexes, series and series with a narrow dtype + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_objs[request.param].copy(deep=True) + + +_typ_objects_series = { + f"{dtype.__name__}-series": Series(dtype) for dtype in tm.PYTHON_DATA_TYPES +} + + +_index_or_series_memory_objs = { + **indices_dict, + **_series, + **_narrow_series, + **_typ_objects_series, +} + + +@pytest.fixture(params=_index_or_series_memory_objs.keys()) +def index_or_series_memory_obj(request): + """ + Fixture for tests on indexes, series, series with a narrow dtype and + series with empty objects type + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_memory_objs[request.param].copy(deep=True) + + +# ---------------------------------------------------------------- +# DataFrames +# ---------------------------------------------------------------- +@pytest.fixture +def int_frame() -> DataFrame: + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + """ + return DataFrame( + np.ones((30, 4), dtype=np.int64), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + ) + + +@pytest.fixture +def float_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + return DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + index=Index([f"foo_{i}" for i in range(30)]), + columns=Index(list("ABCD")), + ) + + +@pytest.fixture +def rand_series_with_duplicate_datetimeindex() -> Series: + """ + Fixture for Series with a DatetimeIndex that has duplicates. + """ + dates = [ + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + + return Series(np.random.default_rng(2).standard_normal(len(dates)), index=dates) + + +# ---------------------------------------------------------------- +# Scalars +# ---------------------------------------------------------------- +@pytest.fixture( + params=[ + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), + (Period("2012-01", freq="M"), "period[M]"), + (Period("2012-02-01", freq="D"), "period[D]"), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + (Timedelta(seconds=500), "timedelta64[ns]"), + ] +) +def ea_scalar_and_dtype(request): + return request.param + + +# ---------------------------------------------------------------- +# Operators & Operations +# ---------------------------------------------------------------- + + +@pytest.fixture(params=tm.arithmetic_dunder_methods) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + operator.eq, + operator.ne, + operator.lt, + operator.le, + operator.gt, + operator.ge, + operator.and_, + ops.rand_, + operator.xor, + ops.rxor, + operator.or_, + ops.ror_, + ] +) +def all_binary_operators(request): + """ + Fixture for operator and roperator arithmetic, comparison, and logical ops. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + ] +) +def all_arithmetic_functions(request): + """ + Fixture for operator and roperator arithmetic functions. + + Notes + ----- + This includes divmod and rdivmod, whereas all_arithmetic_operators + does not. + """ + return request.param + + +_all_numeric_reductions = [ + "count", + "sum", + "max", + "min", + "mean", + "prod", + "std", + "var", + "median", + "kurt", + "skew", + "sem", +] + + +@pytest.fixture(params=_all_numeric_reductions) +def all_numeric_reductions(request): + """ + Fixture for numeric reduction names. + """ + return request.param + + +_all_boolean_reductions = ["all", "any"] + + +@pytest.fixture(params=_all_boolean_reductions) +def all_boolean_reductions(request): + """ + Fixture for boolean reduction names. + """ + return request.param + + +_all_reductions = _all_numeric_reductions + _all_boolean_reductions + + +@pytest.fixture(params=_all_reductions) +def all_reductions(request): + """ + Fixture for all (boolean + numeric) reduction names. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] +) +def comparison_op(request): + """ + Fixture for operator module comparison functions. + """ + return request.param + + +@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"]) +def compare_operators_no_eq_ne(request): + """ + Fixture for dunder names for compare operations except == and != + + * >= + * > + * < + * <= + """ + return request.param + + +@pytest.fixture( + params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"] +) +def all_logical_operators(request): + """ + Fixture for dunder names for common logical operations + + * | + * & + * ^ + """ + return request.param + + +_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"] + + +@pytest.fixture(params=_all_numeric_accumulations) +def all_numeric_accumulations(request): + """ + Fixture for numeric accumulation names + """ + return request.param + + +# ---------------------------------------------------------------- +# Data sets/files +# ---------------------------------------------------------------- +@pytest.fixture +def strict_data_files(pytestconfig): + """ + Returns the configuration for the test setting `--no-strict-data-files`. + """ + return pytestconfig.getoption("--no-strict-data-files") + + +@pytest.fixture +def datapath(strict_data_files: str) -> Callable[..., str]: + """ + Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --no-strict-data-files option is not set. + """ + BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") + + def deco(*args): + path = os.path.join(BASE_PATH, *args) + if not os.path.exists(path): + if strict_data_files: + raise ValueError( + f"Could not find file {path} and --no-strict-data-files is not set." + ) + pytest.skip(f"Could not find {path}.") + return path + + return deco + + +# ---------------------------------------------------------------- +# Time zones +# ---------------------------------------------------------------- +TIMEZONES = [ + None, + "UTC", + "US/Eastern", + "Asia/Tokyo", + "dateutil/US/Pacific", + "dateutil/Asia/Singapore", + "+01:15", + "-02:15", + "UTC+01:15", + "UTC-02:15", + tzutc(), + tzlocal(), + FixedOffset(300), + FixedOffset(0), + FixedOffset(-300), + timezone.utc, + timezone(timedelta(hours=1)), + timezone(timedelta(hours=-1), name="foo"), +] +if zoneinfo is not None: + TIMEZONES.extend( + [ + zoneinfo.ZoneInfo("US/Pacific"), # type: ignore[list-item] + zoneinfo.ZoneInfo("UTC"), # type: ignore[list-item] + ] + ) +TIMEZONE_IDS = [repr(i) for i in TIMEZONES] + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS)) +@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS) +def tz_naive_fixture(request): + """ + Fixture for trying timezones including default (None): {0} + """ + return request.param + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:])) +@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:]) +def tz_aware_fixture(request): + """ + Fixture for trying explicit timezones: {0} + """ + return request.param + + +# Generate cartesian product of tz_aware_fixture: +tz_aware_fixture2 = tz_aware_fixture + + +_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] +if zoneinfo is not None: + _UTCS.append(zoneinfo.ZoneInfo("UTC")) + + +@pytest.fixture(params=_UTCS) +def utc_fixture(request): + """ + Fixture to provide variants of UTC timezone strings and tzinfo objects. + """ + return request.param + + +utc_fixture2 = utc_fixture + + +@pytest.fixture(params=["s", "ms", "us", "ns"]) +def unit(request): + """ + datetime64 units we support. + """ + return request.param + + +unit2 = unit + + +# ---------------------------------------------------------------- +# Dtypes +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.STRING_DTYPES) +def string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * str + * 'str' + * 'U' + """ + return request.param + + +@pytest.fixture( + params=[ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ] +) +def nullable_string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + pytest.param("pyarrow_numpy", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + * 'pyarrow_numpy' + """ + return request.param + + +@pytest.fixture( + params=[ + "numpy_nullable", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def dtype_backend(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param + + +# Alias so we can test with cartesian product of string_storage +string_storage2 = string_storage + + +@pytest.fixture(params=tm.BYTES_DTYPES) +def bytes_dtype(request): + """ + Parametrized fixture for bytes dtypes. + + * bytes + * 'bytes' + """ + return request.param + + +@pytest.fixture(params=tm.OBJECT_DTYPES) +def object_dtype(request): + """ + Parametrized fixture for object dtypes. + + * object + * 'object' + """ + return request.param + + +@pytest.fixture( + params=[ + "object", + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ] +) +def any_string_dtype(request): + """ + Parametrized fixture for string dtypes. + * 'object' + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.DATETIME64_DTYPES) +def datetime64_dtype(request): + """ + Parametrized fixture for datetime64 dtypes. + + * 'datetime64[ns]' + * 'M8[ns]' + """ + return request.param + + +@pytest.fixture(params=tm.TIMEDELTA64_DTYPES) +def timedelta64_dtype(request): + """ + Parametrized fixture for timedelta64 dtypes. + + * 'timedelta64[ns]' + * 'm8[ns]' + """ + return request.param + + +@pytest.fixture +def fixed_now_ts() -> Timestamp: + """ + Fixture emits fixed Timestamp.now() + """ + return Timestamp( # pyright: ignore[reportGeneralTypeIssues] + year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22 + ) + + +@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES) +def float_numpy_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.FLOAT_EA_DTYPES) +def float_ea_dtype(request): + """ + Parameterized fixture for float dtypes. + + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_FLOAT_DTYPES) +def any_float_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.COMPLEX_DTYPES) +def complex_dtype(request): + """ + Parameterized fixture for complex dtypes. + + * complex + * 'complex64' + * 'complex128' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES) +def any_signed_int_numpy_dtype(request): + """ + Parameterized fixture for signed integer dtypes. + + * int + * 'int8' + * 'int16' + * 'int32' + * 'int64' + """ + return request.param + + +@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES) +def any_unsigned_int_numpy_dtype(request): + """ + Parameterized fixture for unsigned integer dtypes. + + * 'uint8' + * 'uint16' + * 'uint32' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES) +def any_int_numpy_dtype(request): + """ + Parameterized fixture for any integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES) +def any_int_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_DTYPES) +def any_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES) +def any_numeric_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any], +# Type[object]]]" and "List[str]") +@pytest.fixture( + params=tm.ALL_INT_EA_DTYPES + + tm.FLOAT_EA_DTYPES + + tm.ALL_INT_PYARROW_DTYPES_STR_REPR + + tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator] +) +def any_numeric_ea_and_arrow_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + * 'uint8[pyarrow]' + * 'int8[pyarrow]' + * 'uint16[pyarrow]' + * 'int16[pyarrow]' + * 'uint32[pyarrow]' + * 'int32[pyarrow]' + * 'uint64[pyarrow]' + * 'int64[pyarrow]' + * 'float32[pyarrow]' + * 'float64[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES) +def any_signed_int_ea_dtype(request): + """ + Parameterized fixture for any signed nullable integer dtype. + + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES) +def any_real_numpy_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_DTYPES) +def any_real_numeric_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + + and associated ea dtypes. + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * bool + * 'bool' + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * str + * 'str' + * 'U' + * bytes + * 'bytes' + * 'datetime64[ns]' + * 'M8[ns]' + * 'timedelta64[ns]' + * 'm8[ns]' + * object + * 'object' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NULLABLE_DTYPES) +def any_real_nullable_dtype(request): + """ + Parameterized fixture for all real dtypes that can hold NA. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + * 'UInt8' + * 'UInt16' + * 'UInt32' + * 'UInt64' + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + * 'uint8[pyarrow]' + * 'uint16[pyarrow]' + * 'uint32[pyarrow]' + * 'uint64[pyarrow]' + * 'int8[pyarrow]' + * 'int16[pyarrow]' + * 'int32[pyarrow]' + * 'int64[pyarrow]' + * 'float[pyarrow]' + * 'double[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES) +def any_numeric_dtype(request): + """ + Parameterized fixture for all numeric dtypes. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# categoricals are handled separately +_any_skipna_inferred_dtype = [ + ("string", ["a", np.nan, "c"]), + ("string", ["a", pd.NA, "c"]), + ("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array + ("bytes", [b"a", np.nan, b"c"]), + ("empty", [np.nan, np.nan, np.nan]), + ("empty", []), + ("mixed-integer", ["a", np.nan, 2]), + ("mixed", ["a", np.nan, 2.0]), + ("floating", [1.0, np.nan, 2.0]), + ("integer", [1, np.nan, 2]), + ("mixed-integer-float", [1, np.nan, 2.0]), + ("decimal", [Decimal(1), np.nan, Decimal(2)]), + ("boolean", [True, np.nan, False]), + ("boolean", [True, pd.NA, False]), + ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), + ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), + ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), + ("complex", [1 + 1j, np.nan, 2 + 2j]), + # The following dtype is commented out due to GH 23554 + # ('timedelta64', [np.timedelta64(1, 'D'), + # np.nan, np.timedelta64(2, 'D')]), + ("timedelta", [timedelta(1), np.nan, timedelta(2)]), + ("time", [time(1), np.nan, time(2)]), + ("period", [Period(2013), pd.NaT, Period(2018)]), + ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), +] +ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id + + +@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids) +def any_skipna_inferred_dtype(request): + """ + Fixture for all inferred dtypes from _libs.lib.infer_dtype + + The covered (inferred) types are: + * 'string' + * 'empty' + * 'bytes' + * 'mixed' + * 'mixed-integer' + * 'mixed-integer-float' + * 'floating' + * 'integer' + * 'decimal' + * 'boolean' + * 'datetime64' + * 'datetime' + * 'date' + * 'timedelta' + * 'time' + * 'period' + * 'interval' + + Returns + ------- + inferred_dtype : str + The string for the inferred dtype from _libs.lib.infer_dtype + values : np.ndarray + An array of object dtype that will be inferred to have + `inferred_dtype` + + Examples + -------- + >>> from pandas._libs import lib + >>> + >>> def test_something(any_skipna_inferred_dtype): + ... inferred_dtype, values = any_skipna_inferred_dtype + ... # will pass + ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype + """ + inferred_dtype, values = request.param + values = np.array(values, dtype=object) # object dtype to avoid casting + + # correctness of inference tested in tests/dtypes/test_inference.py + return inferred_dtype, values + + +# ---------------------------------------------------------------- +# Misc +# ---------------------------------------------------------------- +@pytest.fixture +def ip(): + """ + Get an instance of IPython.InteractiveShell. + + Will raise a skip if IPython is not installed. + """ + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.interactiveshell import InteractiveShell + + # GH#35711 make sure sqlite history file handle is not leaked + from traitlets.config import Config # isort:skip + + c = Config() + c.HistoryManager.hist_file = ":memory:" + + return InteractiveShell(config=c) + + +@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) +def spmatrix(request): + """ + Yields scipy sparse matrix classes. + """ + sparse = pytest.importorskip("scipy.sparse") + + return getattr(sparse, request.param + "_matrix") + + +@pytest.fixture( + params=[ + getattr(pd.offsets, o) + for o in pd.offsets.__all__ + if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick" + ] +) +def tick_classes(request): + """ + Fixture for Tick based datetime offsets available for a time series. + """ + return request.param + + +@pytest.fixture(params=[None, lambda x: x]) +def sort_by_key(request): + """ + Simple fixture for testing keys in sorting methods. + Tests None (no key) and the identity key. + """ + return request.param + + +@pytest.fixture( + params=[ + ("foo", None, None), + ("Egon", "Venkman", None), + ("NCC1701D", "NCC1701D", "NCC1701D"), + # possibly-matching NAs + (np.nan, np.nan, np.nan), + (np.nan, pd.NaT, None), + (np.nan, pd.NA, None), + (pd.NA, pd.NA, pd.NA), + ] +) +def names(request) -> tuple[Hashable, Hashable, Hashable]: + """ + A 3-tuple of names, the first two for operands, the last for a result. + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc]) +def indexer_sli(request): + """ + Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.loc, tm.iloc]) +def indexer_li(request): + """ + Parametrize over loc.__getitem__, iloc.__getitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.iloc]) +def indexer_si(request): + """ + Parametrize over __setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc]) +def indexer_sl(request): + """ + Parametrize over __setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.at, tm.loc]) +def indexer_al(request): + """ + Parametrize over at.__setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.iat, tm.iloc]) +def indexer_ial(request): + """ + Parametrize over iat.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture +def using_array_manager() -> bool: + """ + Fixture to check if the array manager is being used. + """ + return _get_option("mode.data_manager", silent=True) == "array" + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return ( + pd.options.mode.copy_on_write is True + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def warn_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is in warning mode. + """ + return ( + pd.options.mode.copy_on_write == "warn" + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def using_infer_string() -> bool: + """ + Fixture to check if infer string option is enabled. + """ + return pd.options.future.infer_string is True + + +warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"] +if zoneinfo is not None: + warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw")) # type: ignore[arg-type] + + +@pytest.fixture(params=warsaws) +def warsaw(request) -> str: + """ + tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo. + """ + return request.param + + +@pytest.fixture() +def arrow_string_storage(): + return ("pyarrow", "pyarrow_numpy") diff --git a/venv/lib/python3.10/site-packages/pandas/pyproject.toml b/venv/lib/python3.10/site-packages/pandas/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..db9f055799ab05ad59119c7786310e7fe8d37be5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/pyproject.toml @@ -0,0 +1,801 @@ +[build-system] +# Minimum requirements for the build system to execute. +# See https://github.com/scipy/scipy/pull/12940 for the AIX issue. +requires = [ + "meson-python==0.13.1", + "meson==1.2.1", + "wheel", + "Cython==3.0.5", # Note: sync with setup.py, environment.yml and asv.conf.json + # Force numpy higher than 2.0rc1, so that built wheels are compatible + # with both numpy 1 and 2 + "numpy>=2.0.0rc1", + "versioneer[toml]" +] + +build-backend = "mesonpy" + +[project] +name = 'pandas' +dynamic = [ + 'version' +] +description = 'Powerful data structures for data analysis, time series, and statistics' +readme = 'README.md' +authors = [ + { name = 'The Pandas Development Team', email='pandas-dev@python.org' }, +] +license = {file = 'LICENSE'} +requires-python = '>=3.9' +dependencies = [ + "numpy>=1.22.4; python_version<'3.11'", + "numpy>=1.23.2; python_version=='3.11'", + "numpy>=1.26.0; python_version>='3.12'", + "python-dateutil>=2.8.2", + "pytz>=2020.1", + "tzdata>=2022.7" +] +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Cython', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'Topic :: Scientific/Engineering' +] + +[project.urls] +homepage = 'https://pandas.pydata.org' +documentation = 'https://pandas.pydata.org/docs/' +repository = 'https://github.com/pandas-dev/pandas' + +[project.entry-points."pandas_plotting_backends"] +matplotlib = "pandas:plotting._matplotlib" + +[project.optional-dependencies] +test = ['hypothesis>=6.46.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0'] +pyarrow = ['pyarrow>=10.0.1'] +performance = ['bottleneck>=1.3.6', 'numba>=0.56.4', 'numexpr>=2.8.4'] +computation = ['scipy>=1.10.0', 'xarray>=2022.12.0'] +fss = ['fsspec>=2022.11.0'] +aws = ['s3fs>=2022.11.0'] +gcp = ['gcsfs>=2022.11.0', 'pandas-gbq>=0.19.0'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.1.0', 'python-calamine>=0.1.7', 'pyxlsb>=1.0.10', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.5'] +parquet = ['pyarrow>=10.0.1'] +feather = ['pyarrow>=10.0.1'] +hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.20.1', + 'tables>=3.8.0'] +spss = ['pyreadstat>=1.2.0'] +postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.8.0'] +mysql = ['SQLAlchemy>=2.0.0', 'pymysql>=1.0.2'] +sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.8.0', 'adbc-driver-sqlite>=0.8.0'] +html = ['beautifulsoup4>=4.11.2', 'html5lib>=1.1', 'lxml>=4.9.2'] +xml = ['lxml>=4.9.2'] +plot = ['matplotlib>=3.6.3'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.9.0'] +clipboard = ['PyQt5>=5.15.9', 'qtpy>=2.3.0'] +compression = ['zstandard>=0.19.0'] +consortium-standard = ['dataframe-api-compat>=0.1.7'] +all = ['adbc-driver-postgresql>=0.8.0', + 'adbc-driver-sqlite>=0.8.0', + 'beautifulsoup4>=4.11.2', + # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.21.3', + 'bottleneck>=1.3.6', + 'dataframe-api-compat>=0.1.7', + 'fastparquet>=2022.12.0', + 'fsspec>=2022.11.0', + 'gcsfs>=2022.11.0', + 'html5lib>=1.1', + 'hypothesis>=6.46.1', + 'jinja2>=3.1.2', + 'lxml>=4.9.2', + 'matplotlib>=3.6.3', + 'numba>=0.56.4', + 'numexpr>=2.8.4', + 'odfpy>=1.4.1', + 'openpyxl>=3.1.0', + 'pandas-gbq>=0.19.0', + 'psycopg2>=2.9.6', + 'pyarrow>=10.0.1', + 'pymysql>=1.0.2', + 'PyQt5>=5.15.9', + 'pyreadstat>=1.2.0', + 'pytest>=7.3.2', + 'pytest-xdist>=2.2.0', + 'python-calamine>=0.1.7', + 'pyxlsb>=1.0.10', + 'qtpy>=2.3.0', + 'scipy>=1.10.0', + 's3fs>=2022.11.0', + 'SQLAlchemy>=2.0.0', + 'tables>=3.8.0', + 'tabulate>=0.9.0', + 'xarray>=2022.12.0', + 'xlrd>=2.0.1', + 'xlsxwriter>=3.0.5', + 'zstandard>=0.19.0'] + +# TODO: Remove after setuptools support is dropped. +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +include = ["pandas", "pandas.*"] +namespaces = false + +[tool.setuptools.exclude-package-data] +"*" = ["*.c", "*.h"] + +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. +[tool.versioneer] +VCS = "git" +style = "pep440" +versionfile_source = "pandas/_version.py" +versionfile_build = "pandas/_version.py" +tag_prefix = "v" +parentdir_prefix = "pandas-" + +[tool.meson-python.args] +setup = ['--vsenv'] # For Windows + +[tool.cibuildwheel] +skip = "cp36-* cp37-* cp38-* pp* *_i686 *_ppc64le *_s390x" +build-verbosity = "3" +environment = {LDFLAGS="-Wl,--strip-all"} +# TODO: remove this once numpy 2.0 proper releases +# and specify numpy 2.0 as a dependency in [build-system] requires in pyproject.toml +before-build = "pip install numpy==2.0.0rc1" +test-requires = "hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0" +test-command = """ + PANDAS_CI='1' python -c 'import pandas as pd; \ + pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2", "--no-strict-data-files"]); \ + pd.test(extra_args=["-m not clipboard and single_cpu and not slow and not network and not db", "--no-strict-data-files"]);' \ + """ + +[tool.cibuildwheel.windows] +# TODO: remove this once numpy 2.0 proper releases +# and specify numpy 2.0 as a dependency in [build-system] requires in pyproject.toml +before-build = "pip install delvewheel numpy==2.0.0rc1" +repair-wheel-command = "delvewheel repair -w {dest_dir} {wheel}" + +[[tool.cibuildwheel.overrides]] +select = "*-musllinux*" +before-test = "apk update && apk add musl-locales" + +[[tool.cibuildwheel.overrides]] +select = "*-win*" +# We test separately for Windows, since we use +# the windowsservercore docker image to check if any dlls are +# missing from the wheel +test-command = "" + +[[tool.cibuildwheel.overrides]] +# Don't strip wheels on macOS. +# macOS doesn't support stripping wheels with linker +# https://github.com/MacPython/numpy-wheels/pull/87#issuecomment-624878264 +select = "*-macosx*" +environment = {CFLAGS="-g0"} + +[tool.black] +target-version = ['py39', 'py310'] +required-version = '23.11.0' +exclude = ''' +( + asv_bench/env + | \.egg + | \.git + | \.hg + | \.mypy_cache + | \.nox + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | setup.py +) +''' + +[tool.ruff] +line-length = 88 +target-version = "py310" +fix = true +unfixable = [] +typing-modules = ["pandas._typing"] + +select = [ + # pyflakes + "F", + # pycodestyle + "E", "W", + # flake8-2020 + "YTT", + # flake8-bugbear + "B", + # flake8-quotes + "Q", + # flake8-debugger + "T10", + # flake8-gettext + "INT", + # pylint + "PL", + # misc lints + "PIE", + # flake8-pyi + "PYI", + # tidy imports + "TID", + # implicit string concatenation + "ISC", + # type-checking imports + "TCH", + # comprehensions + "C4", + # pygrep-hooks + "PGH", + # Ruff-specific rules + "RUF", + # flake8-bandit: exec-builtin + "S102", + # numpy-legacy-random + "NPY002", + # Perflint + "PERF", + # flynt + "FLY", + # flake8-logging-format + "G", + # flake8-future-annotations + "FA", +] + +ignore = [ + ### Intentionally disabled + # space before : (needed for how black formats slicing) + "E203", + # module level import not at top of file + "E402", + # do not assign a lambda expression, use a def + "E731", + # line break before binary operator + # "W503", # not yet implemented + # line break after binary operator + # "W504", # not yet implemented + # controversial + "B006", + # controversial + "B007", + # controversial + "B008", + # setattr is used to side-step mypy + "B009", + # getattr is used to side-step mypy + "B010", + # tests use assert False + "B011", + # tests use comparisons but not their returned value + "B015", + # false positives + "B019", + # Loop control variable overrides iterable it iterates + "B020", + # Function definition does not bind loop variable + "B023", + # Functions defined inside a loop must not use variables redefined in the loop + # "B301", # not yet implemented + # Only works with python >=3.10 + "B905", + # Too many arguments to function call + "PLR0913", + # Too many returns + "PLR0911", + # Too many branches + "PLR0912", + # Too many statements + "PLR0915", + # Redefined loop name + "PLW2901", + # Global statements are discouraged + "PLW0603", + # Docstrings should not be included in stubs + "PYI021", + # Use `typing.NamedTuple` instead of `collections.namedtuple` + "PYI024", + # No builtin `eval()` allowed + "PGH001", + # compare-to-empty-string + "PLC1901", + # while int | float can be shortened to float, the former is more explicit + "PYI041", + # incorrect-dict-iterator, flags valid Series.items usage + "PERF102", + # try-except-in-loop, becomes useless in Python 3.11 + "PERF203", + + + ### TODO: Enable gradually + # Useless statement + "B018", + # Within an except clause, raise exceptions with ... + "B904", + # Magic number + "PLR2004", + # comparison-with-itself + "PLR0124", + # Consider `elif` instead of `else` then `if` to remove indentation level + "PLR5501", + # collection-literal-concatenation + "RUF005", + # pairwise-over-zipped (>=PY310 only) + "RUF007", + # explicit-f-string-type-conversion + "RUF010", + # mutable-class-default + "RUF012" +] + +exclude = [ + "doc/sphinxext/*.py", + "doc/build/*.py", + "doc/temp/*.py", + ".eggs/*.py", + # vendored files + "pandas/util/version/*", + "pandas/io/clipboard/__init__.py", + # exclude asv benchmark environments from linting + "env", +] + +[tool.ruff.per-file-ignores] +# relative imports allowed for asv_bench +"asv_bench/*" = ["TID", "NPY002"] +# to be enabled gradually +"pandas/core/*" = ["PLR5501"] +"pandas/tests/*" = ["B028", "FLY"] +"scripts/*" = ["B028"] +# Keep this one enabled +"pandas/_typing.py" = ["TCH"] + +[tool.pylint.messages_control] +max-line-length = 88 +disable = [ + # intentionally turned off + "bad-mcs-classmethod-argument", + "broad-except", + "c-extension-no-member", + "comparison-with-itself", + "consider-using-enumerate", + "import-error", + "import-outside-toplevel", + "invalid-name", + "invalid-unary-operand-type", + "line-too-long", + "no-else-continue", + "no-else-raise", + "no-else-return", + "no-member", + "no-name-in-module", + "not-an-iterable", + "overridden-final-method", + "pointless-statement", + "redundant-keyword-arg", + "singleton-comparison", + "too-many-ancestors", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-function-args", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + "unexpected-keyword-arg", + "ungrouped-imports", + "unsubscriptable-object", + "unsupported-assignment-operation", + "unsupported-membership-test", + "unused-import", + "use-dict-literal", + "use-implicit-booleaness-not-comparison", + "use-implicit-booleaness-not-len", + "wrong-import-order", + "wrong-import-position", + "redefined-loop-name", + + # misc + "abstract-class-instantiated", + "no-value-for-parameter", + "undefined-variable", + "unpacking-non-sequence", + "used-before-assignment", + + # pylint type "C": convention, for programming standard violation + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "superfluous-parens", + "too-many-lines", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "unnecessary-lambda-assignment", + + # pylint type "R": refactor, for bad code smell + "consider-using-with", + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "redefined-argument-from-local", + "too-few-public-methods", + + # pylint type "W": warning, for python specific problems + "abstract-method", + "arguments-differ", + "arguments-out-of-order", + "arguments-renamed", + "attribute-defined-outside-init", + "broad-exception-raised", + "comparison-with-callable", + "dangerous-default-value", + "deprecated-module", + "eval-used", + "expression-not-assigned", + "fixme", + "global-statement", + "invalid-overridden-method", + "keyword-arg-before-vararg", + "possibly-unused-variable", + "protected-access", + "raise-missing-from", + "redefined-builtin", + "redefined-outer-name", + "self-cls-assignment", + "signature-differs", + "super-init-not-called", + "try-except-raise", + "unnecessary-lambda", + "unused-argument", + "unused-variable", + "using-constant-test" +] + +[tool.pytest.ini_options] +# sync minversion with pyproject.toml & install.rst +minversion = "7.3.2" +addopts = "--strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" +empty_parameter_set_mark = "fail_at_collect" +xfail_strict = true +testpaths = "pandas" +doctest_optionflags = [ + "NORMALIZE_WHITESPACE", + "IGNORE_EXCEPTION_DETAIL", + "ELLIPSIS", +] +filterwarnings = [ + "error:::pandas", + "error::ResourceWarning", + "error::pytest.PytestUnraisableExceptionWarning", + # TODO(PY311-minimum): Specify EncodingWarning + # Ignore 3rd party EncodingWarning but raise on pandas' + "ignore:.*encoding.* argument not specified", + "error:.*encoding.* argument not specified::pandas", + "ignore:.*ssl.SSLSocket:pytest.PytestUnraisableExceptionWarning", + "ignore:.*ssl.SSLSocket:ResourceWarning", + # GH 44844: Can remove once minimum matplotlib version >= 3.7 + "ignore:.*FileIO:pytest.PytestUnraisableExceptionWarning", + "ignore:.*BufferedRandom:ResourceWarning", + "ignore::ResourceWarning:asyncio", + # From plotting doctests + "ignore:More than 20 figures have been opened:RuntimeWarning", + # Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758 + "ignore:`np.MachAr` is deprecated:DeprecationWarning:numba", + "ignore:.*urllib3:DeprecationWarning:botocore", + "ignore:Setuptools is replacing distutils.:UserWarning:_distutils_hack", + # https://github.com/PyTables/PyTables/issues/822 + "ignore:a closed node found in the registry:UserWarning:tables", + "ignore:`np.object` is a deprecated:DeprecationWarning:tables", + "ignore:tostring:DeprecationWarning:tables", + "ignore:distutils Version classes are deprecated:DeprecationWarning:pandas_datareader", + "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec", + # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged + "ignore:.*In the future `np.long` will be defined as.*:FutureWarning", +] +junit_family = "xunit2" +markers = [ + "single_cpu: tests that should run on a single cpu only", + "slow: mark a test as slow", + "network: mark a test as network", + "db: tests requiring a database (mysql or postgres)", + "clipboard: mark a pd.read_clipboard test", + "arm_slow: mark a test as slow for arm64 architecture", + "skip_ubsan: Tests known to fail UBSAN check", +] + +[tool.mypy] +# Import discovery +mypy_path = "typings" +files = ["pandas", "typings"] +namespace_packages = false +explicit_package_bases = false +ignore_missing_imports = true +follow_imports = "normal" +follow_imports_for_stubs = false +no_site_packages = false +no_silence_site_packages = false +# Platform configuration +python_version = "3.11" +platform = "linux-64" +# Disallow dynamic typing +disallow_any_unimported = false # TODO +disallow_any_expr = false # TODO +disallow_any_decorated = false # TODO +disallow_any_explicit = false # TODO +disallow_any_generics = false # TODO +disallow_subclassing_any = false # TODO +# Untyped definitions and calls +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +# None and Optional handling +no_implicit_optional = true +strict_optional = true +# Configuring warnings +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_return_any = false # TODO +warn_unreachable = false # GH#27396 +# Suppressing errors +ignore_errors = false +enable_error_code = "ignore-without-code" +# Miscellaneous strictness flags +allow_untyped_globals = false +allow_redefinition = false +local_partial_types = false +implicit_reexport = true +strict_equality = true +# Configuring error messages +show_error_context = false +show_column_numbers = false +show_error_codes = true + +[[tool.mypy.overrides]] +module = [ + "pandas._config.config", # TODO + "pandas._libs.*", + "pandas._testing.*", # TODO + "pandas.arrays", # TODO + "pandas.compat.numpy.function", # TODO + "pandas.compat._optional", # TODO + "pandas.compat.compressors", # TODO + "pandas.compat.pickle_compat", # TODO + "pandas.core._numba.executor", # TODO + "pandas.core.array_algos.datetimelike_accumulations", # TODO + "pandas.core.array_algos.masked_accumulations", # TODO + "pandas.core.array_algos.masked_reductions", # TODO + "pandas.core.array_algos.putmask", # TODO + "pandas.core.array_algos.quantile", # TODO + "pandas.core.array_algos.replace", # TODO + "pandas.core.array_algos.take", # TODO + "pandas.core.arrays.*", # TODO + "pandas.core.computation.*", # TODO + "pandas.core.dtypes.astype", # TODO + "pandas.core.dtypes.cast", # TODO + "pandas.core.dtypes.common", # TODO + "pandas.core.dtypes.concat", # TODO + "pandas.core.dtypes.dtypes", # TODO + "pandas.core.dtypes.generic", # TODO + "pandas.core.dtypes.inference", # TODO + "pandas.core.dtypes.missing", # TODO + "pandas.core.groupby.categorical", # TODO + "pandas.core.groupby.generic", # TODO + "pandas.core.groupby.grouper", # TODO + "pandas.core.groupby.groupby", # TODO + "pandas.core.groupby.ops", # TODO + "pandas.core.indexers.*", # TODO + "pandas.core.indexes.*", # TODO + "pandas.core.interchange.column", # TODO + "pandas.core.interchange.dataframe_protocol", # TODO + "pandas.core.interchange.from_dataframe", # TODO + "pandas.core.internals.*", # TODO + "pandas.core.methods.*", # TODO + "pandas.core.ops.array_ops", # TODO + "pandas.core.ops.common", # TODO + "pandas.core.ops.invalid", # TODO + "pandas.core.ops.mask_ops", # TODO + "pandas.core.ops.missing", # TODO + "pandas.core.reshape.*", # TODO + "pandas.core.strings.*", # TODO + "pandas.core.tools.*", # TODO + "pandas.core.window.common", # TODO + "pandas.core.window.ewm", # TODO + "pandas.core.window.expanding", # TODO + "pandas.core.window.numba_", # TODO + "pandas.core.window.online", # TODO + "pandas.core.window.rolling", # TODO + "pandas.core.accessor", # TODO + "pandas.core.algorithms", # TODO + "pandas.core.apply", # TODO + "pandas.core.arraylike", # TODO + "pandas.core.base", # TODO + "pandas.core.common", # TODO + "pandas.core.config_init", # TODO + "pandas.core.construction", # TODO + "pandas.core.flags", # TODO + "pandas.core.frame", # TODO + "pandas.core.generic", # TODO + "pandas.core.indexing", # TODO + "pandas.core.missing", # TODO + "pandas.core.nanops", # TODO + "pandas.core.resample", # TODO + "pandas.core.roperator", # TODO + "pandas.core.sample", # TODO + "pandas.core.series", # TODO + "pandas.core.sorting", # TODO + "pandas.errors", # TODO + "pandas.io.clipboard", # TODO + "pandas.io.excel._base", # TODO + "pandas.io.excel._odfreader", # TODO + "pandas.io.excel._odswriter", # TODO + "pandas.io.excel._openpyxl", # TODO + "pandas.io.excel._pyxlsb", # TODO + "pandas.io.excel._xlrd", # TODO + "pandas.io.excel._xlsxwriter", # TODO + "pandas.io.formats.console", # TODO + "pandas.io.formats.css", # TODO + "pandas.io.formats.excel", # TODO + "pandas.io.formats.format", # TODO + "pandas.io.formats.info", # TODO + "pandas.io.formats.printing", # TODO + "pandas.io.formats.style", # TODO + "pandas.io.formats.style_render", # TODO + "pandas.io.formats.xml", # TODO + "pandas.io.json.*", # TODO + "pandas.io.parsers.*", # TODO + "pandas.io.sas.sas_xport", # TODO + "pandas.io.sas.sas7bdat", # TODO + "pandas.io.clipboards", # TODO + "pandas.io.common", # TODO + "pandas.io.gbq", # TODO + "pandas.io.html", # TODO + "pandas.io.gbq", # TODO + "pandas.io.parquet", # TODO + "pandas.io.pytables", # TODO + "pandas.io.sql", # TODO + "pandas.io.stata", # TODO + "pandas.io.xml", # TODO + "pandas.plotting.*", # TODO + "pandas.tests.*", + "pandas.tseries.frequencies", # TODO + "pandas.tseries.holiday", # TODO + "pandas.util._decorators", # TODO + "pandas.util._doctools", # TODO + "pandas.util._print_versions", # TODO + "pandas.util._test_decorators", # TODO + "pandas.util._validators", # TODO + "pandas.util", # TODO + "pandas._version", + "pandas.conftest", + "pandas" +] +disallow_untyped_calls = false +disallow_untyped_defs = false +disallow_incomplete_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.*", + "pandas._version", + "pandas.io.clipboard", +] +check_untyped_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.apply.test_series_apply", + "pandas.tests.arithmetic.conftest", + "pandas.tests.arrays.sparse.test_combine_concat", + "pandas.tests.dtypes.test_common", + "pandas.tests.frame.methods.test_to_records", + "pandas.tests.groupby.test_rank", + "pandas.tests.groupby.transform.test_transform", + "pandas.tests.indexes.interval.test_interval", + "pandas.tests.indexing.test_categorical", + "pandas.tests.io.excel.test_writers", + "pandas.tests.reductions.test_reductions", + "pandas.tests.test_expressions", +] +ignore_errors = true + +# To be kept consistent with "Import Formatting" section in contributing.rst +[tool.isort] +known_pre_libs = "pandas._config" +known_pre_core = ["pandas._libs", "pandas._typing", "pandas.util._*", "pandas.compat", "pandas.errors"] +known_dtypes = "pandas.core.dtypes" +known_post_core = ["pandas.tseries", "pandas.io", "pandas.plotting"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY" ,"PRE_LIBS" , "PRE_CORE", "DTYPES", "FIRSTPARTY", "POST_CORE", "LOCALFOLDER"] +profile = "black" +combine_as_imports = true +force_grid_wrap = 2 +force_sort_within_sections = true +skip_glob = "env" +skip = "pandas/__init__.py" + +[tool.pyright] +pythonVersion = "3.11" +typeCheckingMode = "basic" +useLibraryCodeForTypes = false +include = ["pandas", "typings"] +exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version", "pandas/core/_numba/extensions.py"] +# enable subset of "strict" +reportDuplicateImport = true +reportInconsistentConstructor = true +reportInvalidStubStatement = true +reportOverlappingOverload = true +reportPropertyTypeMismatch = true +reportUntypedClassDecorator = true +reportUntypedFunctionDecorator = true +reportUntypedNamedTuple = true +reportUnusedImport = true +disableBytesTypePromotions = true +# disable subset of "basic" +reportGeneralTypeIssues = false +reportMissingModuleSource = false +reportOptionalCall = false +reportOptionalIterable = false +reportOptionalMemberAccess = false +reportOptionalOperand = false +reportOptionalSubscript = false +reportPrivateImportUsage = false +reportUnboundVariable = false + +[tool.coverage.run] +branch = true +omit = ["pandas/_typing.py", "pandas/_version.py"] +plugins = ["Cython.Coverage"] +source = ["pandas"] + +[tool.coverage.report] +ignore_errors = false +show_missing = true +omit = ["pandas/_version.py"] +exclude_lines = [ + # Have to re-enable the standard pragma + "pragma: no cover", + # Don't complain about missing debug-only code:s + "def __repr__", + "if self.debug", + # Don't complain if tests don't hit defensive assertion code: + "raise AssertionError", + "raise NotImplementedError", + "AbstractMethodError", + # Don't complain if non-runnable code isn't run: + "if 0:", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.coverage.html] +directory = "coverage_html_report" + +[tool.codespell] +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, expec, expecs" +ignore-regex = 'https://([\w/\.])+' diff --git a/venv/lib/python3.10/site-packages/pandas/testing.py b/venv/lib/python3.10/site-packages/pandas/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..841b55df48556561904b9144a05f747d889ea621 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/testing.py @@ -0,0 +1,18 @@ +""" +Public testing utility functions. +""" + + +from pandas._testing import ( + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_series_equal, +) + +__all__ = [ + "assert_extension_array_equal", + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", +] diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/common.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/common.py new file mode 100644 index 0000000000000000000000000000000000000000..fc41d7907a240f0dd9dc19e0ae1296bee86be421 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/common.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import ( + DataFrame, + concat, +) + +if TYPE_CHECKING: + from pandas._typing import AxisInt + + +def _check_mixed_float(df, dtype=None): + # float16 are most likely to be upcasted to float32 + dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"} + if isinstance(dtype, str): + dtypes = {k: dtype for k, v in dtypes.items()} + elif isinstance(dtype, dict): + dtypes.update(dtype) + if dtypes.get("A"): + assert df.dtypes["A"] == dtypes["A"] + if dtypes.get("B"): + assert df.dtypes["B"] == dtypes["B"] + if dtypes.get("C"): + assert df.dtypes["C"] == dtypes["C"] + if dtypes.get("D"): + assert df.dtypes["D"] == dtypes["D"] + + +def _check_mixed_int(df, dtype=None): + dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"} + if isinstance(dtype, str): + dtypes = {k: dtype for k, v in dtypes.items()} + elif isinstance(dtype, dict): + dtypes.update(dtype) + if dtypes.get("A"): + assert df.dtypes["A"] == dtypes["A"] + if dtypes.get("B"): + assert df.dtypes["B"] == dtypes["B"] + if dtypes.get("C"): + assert df.dtypes["C"] == dtypes["C"] + if dtypes.get("D"): + assert df.dtypes["D"] == dtypes["D"] + + +def zip_frames(frames: list[DataFrame], axis: AxisInt = 1) -> DataFrame: + """ + take a list of frames, zip them together under the + assumption that these all have the first frames' index/columns. + + Returns + ------- + new_frame : DataFrame + """ + if axis == 1: + columns = frames[0].columns + zipped = [f.loc[:, c] for c in columns for f in frames] + return concat(zipped, axis=1) + else: + index = frames[0].index + zipped = [f.loc[i, :] for i in index for f in frames] + return DataFrame(zipped) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..e07024b2e2a097a8442db2039323f45aa18598de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/conftest.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + NaT, + date_range, +) + + +@pytest.fixture +def datetime_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with DatetimeIndex + + Columns are ['A', 'B', 'C', 'D'] + """ + return DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) + + +@pytest.fixture +def float_string_frame(): + """ + Fixture for DataFrame of floats and strings with index of unique strings + + Columns are ['A', 'B', 'C', 'D', 'foo']. + """ + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + ) + df["foo"] = "bar" + return df + + +@pytest.fixture +def mixed_float_frame(): + """ + Fixture for DataFrame of different float types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + df = DataFrame( + { + col: np.random.default_rng(2).random(30, dtype=dtype) + for col, dtype in zip( + list("ABCD"), ["float32", "float32", "float32", "float64"] + ) + }, + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + ) + # not supported by numpy random + df["C"] = df["C"].astype("float16") + return df + + +@pytest.fixture +def mixed_int_frame(): + """ + Fixture for DataFrame of different int types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + return DataFrame( + { + col: np.ones(30, dtype=dtype) + for col, dtype in zip(list("ABCD"), ["int32", "uint64", "uint8", "int64"]) + }, + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + ) + + +@pytest.fixture +def timezone_frame(): + """ + Fixture for DataFrame of date_range Series with different time zones + + Columns are ['A', 'B', 'C']; some entries are missing + + A B C + 0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00 + 1 2013-01-02 NaT NaT + 2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00 + """ + df = DataFrame( + { + "A": date_range("20130101", periods=3), + "B": date_range("20130101", periods=3, tz="US/Eastern"), + "C": date_range("20130101", periods=3, tz="CET"), + } + ) + df.iloc[1, 1] = NaT + df.iloc[1, 2] = NaT + return df diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_alter_axes.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_alter_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..c68171ab254c7c8582a206a8e9b44b3845c47efc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_alter_axes.py @@ -0,0 +1,30 @@ +from datetime import datetime + +import pytz + +from pandas import DataFrame +import pandas._testing as tm + + +class TestDataFrameAlterAxes: + # Tests for setting index/columns attributes directly (i.e. __setattr__) + + def test_set_axis_setattr_index(self): + # GH 6785 + # set the index manually + + df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}]) + expected = df.set_index("ts") + df.index = df["ts"] + df.pop("ts") + tm.assert_frame_equal(df, expected) + + # Renaming + + def test_assign_columns(self, float_frame): + float_frame["hi"] = "there" + + df = float_frame.copy() + df.columns = ["foo", "bar", "baz", "quux", "foo2"] + tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False) + tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..0593de7556406ab143088a0f450e620653db83a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py @@ -0,0 +1,2136 @@ +from collections import deque +from datetime import ( + datetime, + timezone, +) +from enum import Enum +import functools +import operator +import re + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.computation import expressions as expr +from pandas.tests.frame.common import ( + _check_mixed_float, + _check_mixed_int, +) + + +@pytest.fixture +def simple_frame(): + """ + Fixture for simple 3x3 DataFrame + + Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c']. + + one two three + a 1.0 2.0 3.0 + b 4.0 5.0 6.0 + c 7.0 8.0 9.0 + """ + arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) + + return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"]) + + +@pytest.fixture(autouse=True, params=[0, 100], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield request.param + + +class DummyElement: + def __init__(self, value, dtype) -> None: + self.value = value + self.dtype = np.dtype(dtype) + + def __array__(self, dtype=None, copy=None): + return np.array(self.value, dtype=self.dtype) + + def __str__(self) -> str: + return f"DummyElement({self.value}, {self.dtype})" + + def __repr__(self) -> str: + return str(self) + + def astype(self, dtype, copy=False): + self.dtype = dtype + return self + + def view(self, dtype): + return type(self)(self.value.view(dtype), dtype) + + def any(self, axis=None): + return bool(self.value) + + +# ------------------------------------------------------------------- +# Comparisons + + +class TestFrameComparisons: + # Specifically _not_ flex-comparisons + + def test_comparison_with_categorical_dtype(self): + # GH#12564 + + df = DataFrame({"A": ["foo", "bar", "baz"]}) + exp = DataFrame({"A": [True, False, False]}) + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + # casting to categorical shouldn't affect the result + df["A"] = df["A"].astype("category") + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + def test_frame_in_list(self): + # GH#12689 this should raise at the DataFrame level, not blocks + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), columns=list("ABCD") + ) + msg = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg): + df in [None] + + @pytest.mark.parametrize( + "arg, arg2", + [ + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + [ + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + ], + ) + def test_comparison_invalid(self, arg, arg2): + # GH4968 + # invalid date/int comparisons + x = DataFrame(arg) + y = DataFrame(arg2) + # we expect the result to match Series comparisons for + # == and !=, inequalities should raise + result = x == y + expected = DataFrame( + {col: x[col] == y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + result = x != y + expected = DataFrame( + {col: x[col] != y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + msgs = [ + r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", + "invalid type promotion", + ( + # npdev 1.20.0 + r"The DTypes and " + r" do not have a common DType." + ), + ] + msg = "|".join(msgs) + with pytest.raises(TypeError, match=msg): + x >= y + with pytest.raises(TypeError, match=msg): + x > y + with pytest.raises(TypeError, match=msg): + x < y + with pytest.raises(TypeError, match=msg): + x <= y + + @pytest.mark.parametrize( + "left, right", + [ + ("gt", "lt"), + ("lt", "gt"), + ("ge", "le"), + ("le", "ge"), + ("eq", "eq"), + ("ne", "ne"), + ], + ) + def test_timestamp_compare(self, left, right): + # make sure we can compare Timestamps on the right AND left hand side + # GH#4982 + df = DataFrame( + { + "dates1": pd.date_range("20010101", periods=10), + "dates2": pd.date_range("20010102", periods=10), + "intcol": np.random.default_rng(2).integers(1000000000, size=10), + "floatcol": np.random.default_rng(2).standard_normal(10), + "stringcol": [chr(100 + i) for i in range(10)], + } + ) + df.loc[np.random.default_rng(2).random(len(df)) > 0.5, "dates2"] = pd.NaT + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("20010109")) + result = right_f(pd.Timestamp("20010109"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'Timestamp'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("20010109")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("20010109"), df) + # nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("nat")) + result = right_f(pd.Timestamp("nat"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'NaTType'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("nat")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("nat"), df) + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="can't compare string and int" + ) + def test_mixed_comparison(self): + # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, + # not raise TypeError + # (this appears to be fixed before GH#22163, not sure when) + df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) + other = DataFrame([["a", "b"], ["c", "d"]]) + + result = df == other + assert not result.any().any() + + result = df != other + assert result.all().all() + + def test_df_boolean_comparison_error(self): + # GH#4576, GH#22880 + # comparing DataFrame against list/tuple with len(obj) matching + # len(df.columns) is supported as of GH#22800 + df = DataFrame(np.arange(6).reshape((3, 2))) + + expected = DataFrame([[False, False], [True, False], [False, False]]) + + result = df == (2, 2) + tm.assert_frame_equal(result, expected) + + result = df == [2, 2] + tm.assert_frame_equal(result, expected) + + def test_df_float_none_comparison(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + index=range(8), + columns=["A", "B", "C"], + ) + + result = df.__eq__(None) + assert not result.any().any() + + def test_df_string_comparison(self): + df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) + mask_a = df.a > 1 + tm.assert_frame_equal(df[mask_a], df.loc[1:1, :]) + tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :]) + + mask_b = df.b == "foo" + tm.assert_frame_equal(df[mask_b], df.loc[0:0, :]) + tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :]) + + +class TestFrameFlexComparisons: + # TODO: test_bool_flex_frame needs a better name + @pytest.mark.parametrize("op", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_bool_flex_frame(self, op): + data = np.random.default_rng(2).standard_normal((5, 3)) + other_data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + other = DataFrame(other_data) + ndim_5 = np.ones(df.shape + (1, 3)) + + # DataFrame + assert df.eq(df).values.all() + assert not df.ne(df).values.any() + f = getattr(df, op) + o = getattr(operator, op) + # No NAs + tm.assert_frame_equal(f(other), o(df, other)) + # Unaligned + part_o = other.loc[3:, 1:].copy() + rs = f(part_o) + xp = o(df, part_o.reindex(index=df.index, columns=df.columns)) + tm.assert_frame_equal(rs, xp) + # ndarray + tm.assert_frame_equal(f(other.values), o(df, other.values)) + # scalar + tm.assert_frame_equal(f(0), o(df, 0)) + # NAs + msg = "Unable to coerce to Series/DataFrame" + tm.assert_frame_equal(f(np.nan), o(df, np.nan)) + with pytest.raises(ValueError, match=msg): + f(ndim_5) + + @pytest.mark.parametrize("box", [np.array, Series]) + def test_bool_flex_series(self, box): + # Series + # list/tuple + data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + idx_ser = box(np.random.default_rng(2).standard_normal(5)) + col_ser = box(np.random.default_rng(2).standard_normal(3)) + + idx_eq = df.eq(idx_ser, axis=0) + col_eq = df.eq(col_ser) + idx_ne = df.ne(idx_ser, axis=0) + col_ne = df.ne(col_ser) + tm.assert_frame_equal(col_eq, df == Series(col_ser)) + tm.assert_frame_equal(col_eq, -col_ne) + tm.assert_frame_equal(idx_eq, -idx_ne) + tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) + tm.assert_frame_equal(col_eq, df.eq(list(col_ser))) + tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0)) + tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) + + idx_gt = df.gt(idx_ser, axis=0) + col_gt = df.gt(col_ser) + idx_le = df.le(idx_ser, axis=0) + col_le = df.le(col_ser) + + tm.assert_frame_equal(col_gt, df > Series(col_ser)) + tm.assert_frame_equal(col_gt, -col_le) + tm.assert_frame_equal(idx_gt, -idx_le) + tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) + + idx_ge = df.ge(idx_ser, axis=0) + col_ge = df.ge(col_ser) + idx_lt = df.lt(idx_ser, axis=0) + col_lt = df.lt(col_ser) + tm.assert_frame_equal(col_ge, df >= Series(col_ser)) + tm.assert_frame_equal(col_ge, -col_lt) + tm.assert_frame_equal(idx_ge, -idx_lt) + tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) + + idx_ser = Series(np.random.default_rng(2).standard_normal(5)) + col_ser = Series(np.random.default_rng(2).standard_normal(3)) + + def test_bool_flex_frame_na(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + # NA + df.loc[0, 0] = np.nan + rs = df.eq(df) + assert not rs.loc[0, 0] + rs = df.ne(df) + assert rs.loc[0, 0] + rs = df.gt(df) + assert not rs.loc[0, 0] + rs = df.lt(df) + assert not rs.loc[0, 0] + rs = df.ge(df) + assert not rs.loc[0, 0] + rs = df.le(df) + assert not rs.loc[0, 0] + + def test_bool_flex_frame_complex_dtype(self): + # complex + arr = np.array([np.nan, 1, 6, np.nan]) + arr2 = np.array([2j, np.nan, 7, None]) + df = DataFrame({"a": arr}) + df2 = DataFrame({"a": arr2}) + + msg = "|".join( + [ + "'>' not supported between instances of '.*' and 'complex'", + r"unorderable types: .*complex\(\)", # PY35 + ] + ) + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df.gt(df2) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df["a"].gt(df2["a"]) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df.values > df2.values + + rs = df.ne(df2) + assert rs.values.all() + + arr3 = np.array([2j, np.nan, None]) + df3 = DataFrame({"a": arr3}) + + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df3.gt(2j) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df3["a"].gt(2j) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df3.values > 2j + + def test_bool_flex_frame_object_dtype(self): + # corner, dtype=object + df1 = DataFrame({"col": ["foo", np.nan, "bar"]}, dtype=object) + df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]}, dtype=object) + result = df1.ne(df2) + exp = DataFrame({"col": [False, True, False]}) + tm.assert_frame_equal(result, exp) + + def test_flex_comparison_nat(self): + # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, + # and _definitely_ not be NaN + df = DataFrame([pd.NaT]) + + result = df == pd.NaT + # result.iloc[0, 0] is a np.bool_ object + assert result.iloc[0, 0].item() is False + + result = df.eq(pd.NaT) + assert result.iloc[0, 0].item() is False + + result = df != pd.NaT + assert result.iloc[0, 0].item() is True + + result = df.ne(pd.NaT) + assert result.iloc[0, 0].item() is True + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types(self, opname): + # GH 15077, non-empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + result = getattr(df, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types_empty(self, opname): + # GH 15077 empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + empty = df.iloc[:0] + result = getattr(empty, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + def test_df_flex_cmp_ea_dtype_with_ndarray_series(self): + ii = pd.IntervalIndex.from_breaks([1, 2, 3]) + df = DataFrame({"A": ii, "B": ii}) + + ser = Series([0, 0]) + res = df.eq(ser, axis=0) + + expected = DataFrame({"A": [False, False], "B": [False, False]}) + tm.assert_frame_equal(res, expected) + + ser2 = Series([1, 2], index=["A", "B"]) + res2 = df.eq(ser2, axis=1) + tm.assert_frame_equal(res2, expected) + + +# ------------------------------------------------------------------- +# Arithmetic + + +class TestFrameFlexArithmetic: + def test_floordiv_axis0(self): + # make sure we df.floordiv(ser, axis=0) matches column-wise result + arr = np.arange(3) + ser = Series(arr) + df = DataFrame({"A": ser, "B": ser}) + + result = df.floordiv(ser, axis=0) + + expected = DataFrame({col: df[col] // ser for col in df.columns}) + + tm.assert_frame_equal(result, expected) + + result2 = df.floordiv(ser.values, axis=0) + tm.assert_frame_equal(result2, expected) + + def test_df_add_td64_columnwise(self): + # GH 22534 Check that column-wise addition broadcasts correctly + dti = pd.date_range("2016-01-01", periods=10) + tdi = pd.timedelta_range("1", periods=10) + tser = Series(tdi) + df = DataFrame({0: dti, 1: tdi}) + + result = df.add(tser, axis=0) + expected = DataFrame({0: dti + tdi, 1: tdi + tdi}) + tm.assert_frame_equal(result, expected) + + def test_df_add_flex_filled_mixed_dtypes(self): + # GH 19611 + dti = pd.date_range("2016-01-01", periods=3) + ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]") + df = DataFrame({"A": dti, "B": ser}) + other = DataFrame({"A": ser, "B": ser}) + fill = pd.Timedelta(days=1).to_timedelta64() + result = df.add(other, fill_value=fill) + + expected = DataFrame( + { + "A": Series( + ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]" + ), + "B": ser * 2, + } + ) + tm.assert_frame_equal(result, expected) + + def test_arith_flex_frame( + self, all_arithmetic_operators, float_frame, mixed_float_frame + ): + # one instance of parametrized fixture + op = all_arithmetic_operators + + def f(x, y): + # r-versions not in operator-stdlib; get op without "r" and invert + if op.startswith("__r"): + return getattr(operator, op.replace("__r", "__"))(y, x) + return getattr(operator, op)(x, y) + + result = getattr(float_frame, op)(2 * float_frame) + expected = f(float_frame, 2 * float_frame) + tm.assert_frame_equal(result, expected) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + @pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"]) + def test_arith_flex_frame_mixed( + self, + op, + int_frame, + mixed_int_frame, + mixed_float_frame, + switch_numexpr_min_elements, + ): + f = getattr(operator, op) + + # vs mix int + result = getattr(mixed_int_frame, op)(2 + mixed_int_frame) + expected = f(mixed_int_frame, 2 + mixed_int_frame) + + # no overflow in the uint + dtype = None + if op in ["__sub__"]: + dtype = {"B": "uint64", "C": None} + elif op in ["__add__", "__mul__"]: + dtype = {"C": None} + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + # when using numexpr, the casting rules are slightly different: + # in the `2 + mixed_int_frame` operation, int32 column becomes + # and int64 column (not preserving dtype in operation with Python + # scalar), and then the int32/int64 combo results in int64 result + dtype["A"] = (2 + mixed_int_frame)["A"].dtype + tm.assert_frame_equal(result, expected) + _check_mixed_int(result, dtype=dtype) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + # vs plain int + result = getattr(int_frame, op)(2 * int_frame) + expected = f(int_frame, 2 * int_frame) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dim", range(3, 6)) + def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame, dim): + # one instance of parametrized fixture + op = all_arithmetic_operators + + # Check that arrays with dim >= 3 raise + arr = np.ones((1,) * dim) + msg = "Unable to coerce to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + getattr(float_frame, op)(arr) + + def test_arith_flex_frame_corner(self, float_frame): + const_add = float_frame.add(1) + tm.assert_frame_equal(const_add, float_frame + 1) + + # corner cases + result = float_frame.add(float_frame[:0]) + expected = float_frame.sort_index() * np.nan + tm.assert_frame_equal(result, expected) + + result = float_frame[:0].add(float_frame) + expected = float_frame.sort_index() * np.nan + tm.assert_frame_equal(result, expected) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], fill_value=3) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], axis="index", fill_value=3) + + @pytest.mark.parametrize("op", ["add", "sub", "mul", "mod"]) + def test_arith_flex_series_ops(self, simple_frame, op): + # after arithmetic refactor, add truediv here + df = simple_frame + + row = df.xs("a") + col = df["two"] + f = getattr(df, op) + op = getattr(operator, op) + tm.assert_frame_equal(f(row), op(df, row)) + tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T) + + def test_arith_flex_series(self, simple_frame): + df = simple_frame + + row = df.xs("a") + col = df["two"] + # special case for some reason + tm.assert_frame_equal(df.add(row, axis=None), df + row) + + # cases which will be refactored after big arithmetic refactor + tm.assert_frame_equal(df.div(row), df / row) + tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T) + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + def test_arith_flex_series_broadcasting(self, dtype): + # broadcasting issue in GH 7325 + df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype=dtype) + expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) + result = df.div(df[0], axis="index") + tm.assert_frame_equal(result, expected) + + def test_arith_flex_zero_len_raises(self): + # GH 19522 passing fill_value to frame flex arith methods should + # raise even in the zero-length special cases + ser_len0 = Series([], dtype=object) + df_len0 = DataFrame(columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + + with pytest.raises(NotImplementedError, match="fill_value"): + df.add(ser_len0, fill_value="E") + + with pytest.raises(NotImplementedError, match="fill_value"): + df_len0.sub(df["A"], axis=None, fill_value=3) + + def test_flex_add_scalar_fill_value(self): + # GH#12723 + dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float") + df = DataFrame({"foo": dat}, index=range(6)) + + exp = df.fillna(0).add(2) + res = df.add(2, fill_value=0) + tm.assert_frame_equal(res, exp) + + def test_sub_alignment_with_duplicate_index(self): + # GH#5185 dup aligning operations should work + df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) + df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) + expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3]) + result = df1.sub(df2) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"]) + def test_arithmetic_with_duplicate_columns(self, op): + # operations + df = DataFrame({"A": np.arange(10), "B": np.random.default_rng(2).random(10)}) + expected = getattr(df, op)(df) + expected.columns = ["A", "A"] + df.columns = ["A", "A"] + result = getattr(df, op)(df) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("level", [0, None]) + def test_broadcast_multiindex(self, level): + # GH34388 + df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]}) + df1.columns = df1.columns.set_names("L1") + + df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]}) + df2.columns = df2.columns.set_names(["L1", "L2"]) + + result = df1.add(df2, level=level) + expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]}) + expected.columns = expected.columns.set_names(["L1", "L2"]) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations(self): + # GH 43321 + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, 3.4], 2020: [3.4, 4.4, 5.4]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_series_index_to_frame_index(self): + # GH 43321 + df = DataFrame( + {2010: [1], 2020: [3]}, + index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]), + ) + + series = Series( + [10.0, 20.0, 30.0], + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + expected = DataFrame( + {2010: [11.0, 21, 31.0], 2020: [13.0, 23.0, 33.0]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_no_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["c"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: np.nan, 2020: np.nan}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "b", 2), + ("a", "c", np.nan), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_part_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, np.nan], 2020: [3.4, 4.4, np.nan]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + +class TestFrameArithmetic: + def test_td64_op_nat_casting(self): + # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 + # when calling dispatch_to_series in DataFrame arithmetic + ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]") + df = DataFrame([[1, 2], [3, 4]]) + + result = df * ser + expected = DataFrame({0: ser, 1: ser}) + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_rowlike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + expected = DataFrame( + [[2, 4], [4, 6], [6, 8]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + rowlike + tm.assert_frame_equal(result, expected) + result = rowlike + df + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_collike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + expected = DataFrame( + [[1, 2], [5, 6], [9, 10]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + collike + tm.assert_frame_equal(result, expected) + result = collike + df + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_rowlike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + exvals = [ + getattr(df.loc["A"], opname)(rowlike.squeeze()), + getattr(df.loc["B"], opname)(rowlike.squeeze()), + getattr(df.loc["C"], opname)(rowlike.squeeze()), + ] + + expected = DataFrame(exvals, columns=df.columns, index=df.index) + + result = getattr(df, opname)(rowlike) + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_collike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + exvals = { + True: getattr(df[True], opname)(collike.squeeze()), + False: getattr(df[False], opname)(collike.squeeze()), + } + + dtype = None + if opname in ["__rmod__", "__rfloordiv__"]: + # Series ops may return mixed int/float dtypes in cases where + # DataFrame op will return all-float. So we upcast `expected` + dtype = np.common_type(*(x.values for x in exvals.values())) + + expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) + + result = getattr(df, opname)(collike) + tm.assert_frame_equal(result, expected) + + def test_df_bool_mul_int(self): + # GH 22047, GH 22163 multiplication by 1 should result in int dtype, + # not object dtype + df = DataFrame([[False, True], [False, False]]) + result = df * 1 + + # On appveyor this comes back as np.int32 instead of np.int64, + # so we check dtype.kind instead of just dtype + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + result = 1 * df + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + def test_arith_mixed(self): + left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]}) + + result = left + left + expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("col", ["A", "B"]) + def test_arith_getitem_commute(self, all_arithmetic_functions, col): + df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) + result = all_arithmetic_functions(df, 1)[col] + expected = all_arithmetic_functions(df[col], 1) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])] + ) + def test_arith_alignment_non_pandas_object(self, values): + # GH#17901 + df = DataFrame({"A": [1, 1], "B": [1, 1]}) + expected = DataFrame({"A": [2, 2], "B": [3, 3]}) + result = df + values + tm.assert_frame_equal(result, expected) + + def test_arith_non_pandas_object(self): + df = DataFrame( + np.arange(1, 10, dtype="f8").reshape(3, 3), + columns=["one", "two", "three"], + index=["a", "b", "c"], + ) + + val1 = df.xs("a").values + added = DataFrame(df.values + val1, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val1, added) + + added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val1, axis=0), added) + + val2 = list(df["two"]) + + added = DataFrame(df.values + val2, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val2, added) + + added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val2, axis="index"), added) + + val3 = np.random.default_rng(2).random(df.shape) + added = DataFrame(df.values + val3, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val3), added) + + def test_operations_with_interval_categories_index(self, all_arithmetic_operators): + # GH#27415 + op = all_arithmetic_operators + ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0)) + data = [1, 2] + df = DataFrame([data], columns=ind) + num = 10 + result = getattr(df, op)(num) + expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) + tm.assert_frame_equal(result, expected) + + def test_frame_with_frame_reindex(self): + # GH#31623 + df = DataFrame( + { + "foo": [pd.Timestamp("2019"), pd.Timestamp("2020")], + "bar": [pd.Timestamp("2018"), pd.Timestamp("2021")], + }, + columns=["foo", "bar"], + dtype="M8[ns]", + ) + df2 = df[["foo"]] + + result = df - df2 + + expected = DataFrame( + {"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]}, + columns=["bar", "foo"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "value, dtype", + [ + (1, "i8"), + (1.0, "f8"), + (2**63, "f8"), + (1j, "complex128"), + (2**63, "complex128"), + (True, "bool"), + (np.timedelta64(20, "ns"), " b + tm.assert_frame_equal(result, expected) + + result = df.values > b + tm.assert_numpy_array_equal(result, expected.values) + + msg1d = "Unable to coerce to Series, length must be 2: given 3" + msg2d = "Unable to coerce to DataFrame, shape must be" + msg2db = "operands could not be broadcast together with shapes" + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > lst + + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > tup + + # broadcasts like ndarray (GH#23000) + result = df > b_r + tm.assert_frame_equal(result, expected) + + result = df.values > b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df > b_c + + with pytest.raises(ValueError, match=msg2db): + df.values > b_c + + # == + expected = DataFrame([[False, False], [True, False], [False, False]]) + result = df == b + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + # broadcasts like ndarray (GH#23000) + result = df == b_r + tm.assert_frame_equal(result, expected) + + result = df.values == b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df == b_c + + assert df.values.shape != b_c.shape + + # with alignment + df = DataFrame( + np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc") + ) + expected.index = df.index + expected.columns = df.columns + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + def test_inplace_ops_alignment(self): + # inplace ops / ops alignment + # GH 8511 + + columns = list("abcdefg") + X_orig = DataFrame( + np.arange(10 * len(columns)).reshape(-1, len(columns)), + columns=columns, + index=range(10), + ) + Z = 100 * X_orig.iloc[:, 1:-1].copy() + block1 = list("bedcf") + subs = list("bcdef") + + # add + X = X_orig.copy() + result1 = (X[block1] + Z).reindex(columns=subs) + + X[block1] += Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] + Z[block1]).reindex(columns=subs) + + X[block1] += Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + # sub + X = X_orig.copy() + result1 = (X[block1] - Z).reindex(columns=subs) + + X[block1] -= Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] - Z[block1]).reindex(columns=subs) + + X[block1] -= Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + def test_inplace_ops_identity(self): + # GH 5104 + # make sure that we are actually changing the object + s_orig = Series([1, 2, 3]) + df_orig = DataFrame( + np.random.default_rng(2).integers(0, 5, size=10).reshape(-1, 5) + ) + + # no dtype change + s = s_orig.copy() + s2 = s + s += 1 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1, s) + assert s is s2 + assert s._mgr is s2._mgr + + df = df_orig.copy() + df2 = df + df += 1 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1, df) + assert df is df2 + assert df._mgr is df2._mgr + + # dtype change + s = s_orig.copy() + s2 = s + s += 1.5 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1.5, s) + + df = df_orig.copy() + df2 = df + df += 1.5 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1.5, df) + assert df is df2 + assert df._mgr is df2._mgr + + # mixed dtype + arr = np.random.default_rng(2).integers(0, 10, size=5) + df_orig = DataFrame({"A": arr.copy(), "B": "foo"}) + df = df_orig.copy() + df2 = df + df["A"] += 1 + expected = DataFrame({"A": arr.copy() + 1, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + df = df_orig.copy() + df2 = df + df["A"] += 1.5 + expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + @pytest.mark.parametrize( + "op", + [ + "add", + "and", + pytest.param( + "div", + marks=pytest.mark.xfail( + raises=AttributeError, reason="__idiv__ not implemented" + ), + ), + "floordiv", + "mod", + "mul", + "or", + "pow", + "sub", + "truediv", + "xor", + ], + ) + def test_inplace_ops_identity2(self, op): + df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]}) + + operand = 2 + if op in ("and", "or", "xor"): + # cannot use floats for boolean ops + df["a"] = [True, False, True] + + df_copy = df.copy() + iop = f"__i{op}__" + op = f"__{op}__" + + # no id change and value is correct + getattr(df, iop)(operand) + expected = getattr(df_copy, op)(operand) + tm.assert_frame_equal(df, expected) + expected = id(df) + assert id(df) == expected + + @pytest.mark.parametrize( + "val", + [ + [1, 2, 3], + (1, 2, 3), + np.array([1, 2, 3], dtype=np.int64), + range(1, 4), + ], + ) + def test_alignment_non_pandas(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + + expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index) + tm.assert_frame_equal(align(df, val, axis=0)[1], expected) + + expected = DataFrame( + {"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index + ) + tm.assert_frame_equal(align(df, val, axis=1)[1], expected) + + @pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]) + def test_alignment_non_pandas_length_mismatch(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + # length mismatch + msg = "Unable to coerce to Series, length must be 3: given 2" + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_alignment_non_pandas_index_columns(self): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + tm.assert_frame_equal( + align(df, val, axis=0)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + tm.assert_frame_equal( + align(df, val, axis=1)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + + # shape mismatch + msg = "Unable to coerce to DataFrame, shape must be" + val = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + val = np.zeros((3, 3, 3)) + msg = re.escape( + "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)" + ) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_no_warning(self, all_arithmetic_operators): + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with tm.assert_produces_warning(None): + getattr(df, all_arithmetic_operators)(b) + + def test_dunder_methods_binary(self, all_arithmetic_operators): + # GH#??? frame.__foo__ should only accept one argument + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with pytest.raises(TypeError, match="takes 2 positional arguments"): + getattr(df, all_arithmetic_operators)(b, 0) + + def test_align_int_fill_bug(self): + # GH#910 + X = np.arange(10 * 10, dtype="float64").reshape(10, 10) + Y = np.ones((10, 1), dtype=int) + + df1 = DataFrame(X) + df1["0.X"] = Y.squeeze() + + df2 = df1.astype(float) + + result = df1 - df1.mean() + expected = df2 - df2.mean() + tm.assert_frame_equal(result, expected) + + +def test_pow_with_realignment(): + # GH#32685 pow has special semantics for operating with null values + left = DataFrame({"A": [0, 1, 2]}) + right = DataFrame(index=[0, 1, 2]) + + result = left**right + expected = DataFrame({"A": [np.nan, 1.0, np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_series_extension_dtypes(): + # https://github.com/pandas-dev/pandas/issues/34311 + df = DataFrame( + np.random.default_rng(2).integers(0, 100, (10, 3)), columns=["a", "b", "c"] + ) + ser = Series([1, 2, 3], index=["a", "b", "c"]) + + expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3) + expected = DataFrame(expected, columns=df.columns, dtype="Int64") + + df_ea = df.astype("Int64") + result = df_ea + ser + tm.assert_frame_equal(result, expected) + result = df_ea + ser.astype("Int64") + tm.assert_frame_equal(result, expected) + + +def test_dataframe_blockwise_slicelike(): + # GH#34367 + arr = np.random.default_rng(2).integers(0, 1000, (100, 10)) + df1 = DataFrame(arr) + # Explicit cast to float to avoid implicit cast when setting nan + df2 = df1.copy().astype({1: "float", 3: "float", 7: "float"}) + df2.iloc[0, [1, 3, 7]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df3 = df1.copy().astype({5: "float"}) + df3.iloc[0, [5]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df4 = df1.copy().astype({2: "float", 3: "float", 4: "float"}) + df4.iloc[0, np.arange(2, 5)] = np.nan + # Explicit cast to float to avoid implicit cast when setting nan + df5 = df1.copy().astype({4: "float", 5: "float", 6: "float"}) + df5.iloc[0, np.arange(4, 7)] = np.nan + + for left, right in [(df1, df2), (df2, df3), (df4, df5)]: + res = left + right + + expected = DataFrame({i: left[i] + right[i] for i in left.columns}) + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "df, col_dtype", + [ + (DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), + ( + DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")).astype( + {"b": object} + ), + "object", + ), + ], +) +def test_dataframe_operation_with_non_numeric_types(df, col_dtype): + # GH #22663 + expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) + expected = expected.astype({"b": col_dtype}) + result = df + Series([-1.0], index=list("a")) + tm.assert_frame_equal(result, expected) + + +def test_arith_reindex_with_duplicates(): + # https://github.com/pandas-dev/pandas/issues/35194 + df1 = DataFrame(data=[[0]], columns=["second"]) + df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) + result = df1 + df2 + expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]]) +def test_arith_list_of_arraylike_raise(to_add): + # GH 36702. Raise when trying to add list of array-like to DataFrame + df = DataFrame({"x": [1, 2], "y": [1, 2]}) + + msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + df + to_add + with pytest.raises(ValueError, match=msg): + to_add + df + + +def test_inplace_arithmetic_series_update(using_copy_on_write, warn_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/36373 + df = DataFrame({"A": [1, 2, 3]}) + df_orig = df.copy() + series = df["A"] + vals = series._values + + with tm.assert_cow_warning(warn_copy_on_write): + series += 1 + if using_copy_on_write: + assert series._values is not vals + tm.assert_frame_equal(df, df_orig) + else: + assert series._values is vals + + expected = DataFrame({"A": [2, 3, 4]}) + tm.assert_frame_equal(df, expected) + + +def test_arithmetic_multiindex_align(): + """ + Regression test for: https://github.com/pandas-dev/pandas/issues/33765 + """ + df1 = DataFrame( + [[1]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + df2 = DataFrame([[1]], index=["a"], columns=Index([0], name="a")) + expected = DataFrame( + [[0]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + result = df1 - df2 + tm.assert_frame_equal(result, expected) + + +def test_bool_frame_mult_float(): + # GH 18549 + df = DataFrame(True, list("ab"), list("cd")) + result = df * 1.0 + expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd")) + tm.assert_frame_equal(result, expected) + + +def test_frame_sub_nullable_int(any_int_ea_dtype): + # GH 32822 + series1 = Series([1, 2, None], dtype=any_int_ea_dtype) + series2 = Series([1, 2, 3], dtype=any_int_ea_dtype) + expected = DataFrame([0, 0, None], dtype=any_int_ea_dtype) + result = series1.to_frame() - series2.to_frame() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) +def test_frame_op_subclass_nonclass_constructor(): + # GH#43201 subclass._constructor is a function, not the subclass itself + + class SubclassedSeries(Series): + @property + def _constructor(self): + return SubclassedSeries + + @property + def _constructor_expanddim(self): + return SubclassedDataFrame + + class SubclassedDataFrame(DataFrame): + _metadata = ["my_extra_data"] + + def __init__(self, my_extra_data, *args, **kwargs) -> None: + self.my_extra_data = my_extra_data + super().__init__(*args, **kwargs) + + @property + def _constructor(self): + return functools.partial(type(self), self.my_extra_data) + + @property + def _constructor_sliced(self): + return SubclassedSeries + + sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]}) + result = sdf * 2 + expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]}) + tm.assert_frame_equal(result, expected) + + result = sdf + sdf + tm.assert_frame_equal(result, expected) + + +def test_enum_column_equality(): + Cols = Enum("Cols", "col1 col2") + + q1 = DataFrame({Cols.col1: [1, 2, 3]}) + q2 = DataFrame({Cols.col1: [1, 2, 3]}) + + result = q1[Cols.col1] == q2[Cols.col1] + expected = Series([True, True, True], name=Cols.col1) + + tm.assert_series_equal(result, expected) + + +def test_mixed_col_index_dtype(): + # GH 47382 + df1 = DataFrame(columns=list("abc"), data=1.0, index=[0]) + df2 = DataFrame(columns=list("abc"), data=0.0, index=[0]) + df1.columns = df2.columns.astype("string") + result = df1 + df2 + expected = DataFrame(columns=list("abc"), data=1.0, index=[0]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arrow_interface.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arrow_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..098d1829b973cedab334cb5992b6a74f7d2c7766 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_arrow_interface.py @@ -0,0 +1,45 @@ +import ctypes + +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd + +pa = pytest.importorskip("pyarrow") + + +@td.skip_if_no("pyarrow", min_version="14.0") +def test_dataframe_arrow_interface(): + df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + + capsule = df.__arrow_c_stream__() + assert ( + ctypes.pythonapi.PyCapsule_IsValid( + ctypes.py_object(capsule), b"arrow_array_stream" + ) + == 1 + ) + + table = pa.table(df) + expected = pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + assert table.equals(expected) + + schema = pa.schema([("a", pa.int8()), ("b", pa.string())]) + table = pa.table(df, schema=schema) + expected = expected.cast(schema) + assert table.equals(expected) + + +@td.skip_if_no("pyarrow", min_version="15.0") +def test_dataframe_to_arrow(): + df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + + table = pa.RecordBatchReader.from_stream(df).read_all() + expected = pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + assert table.equals(expected) + + schema = pa.schema([("a", pa.int8()), ("b", pa.string())]) + table = pa.RecordBatchReader.from_stream(df, schema=schema).read_all() + expected = expected.cast(schema) + assert table.equals(expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_block_internals.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_block_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..712494ef15f972044adbf28ca459f942b0c237d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_block_internals.py @@ -0,0 +1,457 @@ +from datetime import ( + datetime, + timedelta, +) +import itertools + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, + Timestamp, + date_range, + option_context, +) +import pandas._testing as tm +from pandas.core.internals.blocks import NumpyBlock + +# Segregated collection of methods that require the BlockManager internal data +# structure + + +# TODO(ArrayManager) check which of those tests need to be rewritten to test the +# equivalent for ArrayManager +pytestmark = td.skip_array_manager_invalid_test + + +class TestDataFrameBlockInternals: + def test_setitem_invalidates_datetime_index_freq(self): + # GH#24096 altering a datetime64tz column inplace invalidates the + # `freq` attribute on the underlying DatetimeIndex + + dti = date_range("20130101", periods=3, tz="US/Eastern") + ts = dti[1] + + df = DataFrame({"B": dti}) + assert df["B"]._values.freq is None + + df.iloc[1, 0] = pd.NaT + assert df["B"]._values.freq is None + + # check that the DatetimeIndex was not altered in place + assert dti.freq == "D" + assert dti[1] == ts + + def test_cast_internals(self, float_frame): + msg = "Passing a BlockManager to DataFrame" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + casted = DataFrame(float_frame._mgr, dtype=int) + expected = DataFrame(float_frame._series, dtype=int) + tm.assert_frame_equal(casted, expected) + + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + casted = DataFrame(float_frame._mgr, dtype=np.int32) + expected = DataFrame(float_frame._series, dtype=np.int32) + tm.assert_frame_equal(casted, expected) + + def test_consolidate(self, float_frame): + float_frame["E"] = 7.0 + consolidated = float_frame._consolidate() + assert len(consolidated._mgr.blocks) == 1 + + # Ensure copy, do I want this? + recons = consolidated._consolidate() + assert recons is not consolidated + tm.assert_frame_equal(recons, consolidated) + + float_frame["F"] = 8.0 + assert len(float_frame._mgr.blocks) == 3 + + return_value = float_frame._consolidate_inplace() + assert return_value is None + assert len(float_frame._mgr.blocks) == 1 + + def test_consolidate_inplace(self, float_frame): + # triggers in-place consolidation + for letter in range(ord("A"), ord("Z")): + float_frame[chr(letter)] = chr(letter) + + def test_modify_values(self, float_frame, using_copy_on_write): + if using_copy_on_write: + with pytest.raises(ValueError, match="read-only"): + float_frame.values[5] = 5 + assert (float_frame.values[5] != 5).all() + return + + float_frame.values[5] = 5 + assert (float_frame.values[5] == 5).all() + + # unconsolidated + float_frame["E"] = 7.0 + col = float_frame["E"] + float_frame.values[6] = 6 + # as of 2.0 .values does not consolidate, so subsequent calls to .values + # does not share data + assert not (float_frame.values[6] == 6).all() + + assert (col == 7).all() + + def test_boolean_set_uncons(self, float_frame): + float_frame["E"] = 7.0 + + expected = float_frame.values.copy() + expected[expected > 1] = 2 + + float_frame[float_frame > 1] = 2 + tm.assert_almost_equal(expected, float_frame.values) + + def test_constructor_with_convert(self): + # this is actually mostly a test of lib.maybe_convert_objects + # #2845 + df = DataFrame({"A": [2**63 - 1]}) + result = df["A"] + expected = Series(np.asarray([2**63 - 1], np.int64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2**63]}) + result = df["A"] + expected = Series(np.asarray([2**63], np.uint64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [datetime(2005, 1, 1), True]}) + result = df["A"] + expected = Series( + np.asarray([datetime(2005, 1, 1), True], np.object_), name="A" + ) + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [None, 1]}) + result = df["A"] + expected = Series(np.asarray([np.nan, 1], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0, 2]}) + result = df["A"] + expected = Series(np.asarray([1.0, 2], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, 3]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, 3.0]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, True]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0, None]}) + result = df["A"] + expected = Series(np.asarray([1.0, np.nan], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, None]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2.0, 1, True, None]}) + result = df["A"] + expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]}) + result = df["A"] + expected = Series( + np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A" + ) + tm.assert_series_equal(result, expected) + + def test_construction_with_mixed(self, float_string_frame, using_infer_string): + # test construction edge cases with mixed types + + # f7u12, this does not work without extensive workaround + data = [ + [datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], + [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)], + ] + df = DataFrame(data) + + # check dtypes + result = df.dtypes + expected = Series({"datetime64[us]": 3}) + + # mixed-type frames + float_string_frame["datetime"] = datetime.now() + float_string_frame["timedelta"] = timedelta(days=1, seconds=1) + assert float_string_frame["datetime"].dtype == "M8[us]" + assert float_string_frame["timedelta"].dtype == "m8[us]" + result = float_string_frame.dtypes + expected = Series( + [np.dtype("float64")] * 4 + + [ + np.dtype("object") if not using_infer_string else "string", + np.dtype("datetime64[us]"), + np.dtype("timedelta64[us]"), + ], + index=list("ABCD") + ["foo", "datetime", "timedelta"], + ) + tm.assert_series_equal(result, expected) + + def test_construction_with_conversions(self): + # convert from a numpy array of non-ns timedelta64; as of 2.0 this does + # *not* convert + arr = np.array([1, 2, 3], dtype="timedelta64[s]") + df = DataFrame(index=range(3)) + df["A"] = arr + expected = DataFrame( + {"A": pd.timedelta_range("00:00:01", periods=3, freq="s")}, index=range(3) + ) + tm.assert_numpy_array_equal(df["A"].to_numpy(), arr) + + expected = DataFrame( + { + "dt1": Timestamp("20130101"), + "dt2": date_range("20130101", periods=3).astype("M8[s]"), + # 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'), + # FIXME: don't leave commented-out + }, + index=range(3), + ) + assert expected.dtypes["dt1"] == "M8[s]" + assert expected.dtypes["dt2"] == "M8[s]" + + df = DataFrame(index=range(3)) + df["dt1"] = np.datetime64("2013-01-01") + df["dt2"] = np.array( + ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" + ) + + # df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 + # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]') + # FIXME: don't leave commented-out + + tm.assert_frame_equal(df, expected) + + def test_constructor_compound_dtypes(self): + # GH 5191 + # compound dtypes should raise not-implementederror + + def f(dtype): + data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)) + return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype) + + msg = "compound dtypes are not implemented in the DataFrame constructor" + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) + + # pre-2.0 these used to work (though results may be unexpected) + with pytest.raises(TypeError, match="argument must be"): + f("int64") + with pytest.raises(TypeError, match="argument must be"): + f("float64") + + # 10822 + msg = "^Unknown datetime string format, unable to parse: aa, at position 0$" + with pytest.raises(ValueError, match=msg): + f("M8[ns]") + + def test_pickle(self, float_string_frame, timezone_frame): + empty_frame = DataFrame() + + unpickled = tm.round_trip_pickle(float_string_frame) + tm.assert_frame_equal(float_string_frame, unpickled) + + # buglet + float_string_frame._mgr.ndim + + # empty + unpickled = tm.round_trip_pickle(empty_frame) + repr(unpickled) + + # tz frame + unpickled = tm.round_trip_pickle(timezone_frame) + tm.assert_frame_equal(timezone_frame, unpickled) + + def test_consolidate_datetime64(self): + # numpy vstack bug + + df = DataFrame( + { + "starting": pd.to_datetime( + [ + "2012-06-21 00:00", + "2012-06-23 07:00", + "2012-06-23 16:30", + "2012-06-25 08:00", + "2012-06-26 12:00", + ] + ), + "ending": pd.to_datetime( + [ + "2012-06-23 07:00", + "2012-06-23 16:30", + "2012-06-25 08:00", + "2012-06-26 12:00", + "2012-06-27 08:00", + ] + ), + "measure": [77, 65, 77, 0, 77], + } + ) + + ser_starting = df.starting + ser_starting.index = ser_starting.values + ser_starting = ser_starting.tz_localize("US/Eastern") + ser_starting = ser_starting.tz_convert("UTC") + ser_starting.index.name = "starting" + + ser_ending = df.ending + ser_ending.index = ser_ending.values + ser_ending = ser_ending.tz_localize("US/Eastern") + ser_ending = ser_ending.tz_convert("UTC") + ser_ending.index.name = "ending" + + df.starting = ser_starting.index + df.ending = ser_ending.index + + tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index) + tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index) + + def test_is_mixed_type(self, float_frame, float_string_frame): + assert not float_frame._is_mixed_type + assert float_string_frame._is_mixed_type + + def test_stale_cached_series_bug_473(self, using_copy_on_write, warn_copy_on_write): + # this is chained, but ok + with option_context("chained_assignment", None): + Y = DataFrame( + np.random.default_rng(2).random((4, 4)), + index=("a", "b", "c", "d"), + columns=("e", "f", "g", "h"), + ) + repr(Y) + Y["e"] = Y["e"].astype("object") + with tm.raises_chained_assignment_error(): + Y["g"]["c"] = np.nan + repr(Y) + Y.sum() + Y["g"].sum() + if using_copy_on_write: + assert not pd.isna(Y["g"]["c"]) + else: + assert pd.isna(Y["g"]["c"]) + + @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning") + def test_strange_column_corruption_issue(self, using_copy_on_write): + # TODO(wesm): Unclear how exactly this is related to internal matters + df = DataFrame(index=[0, 1]) + df[0] = np.nan + wasCol = {} + + with tm.assert_produces_warning( + PerformanceWarning, raise_on_extra_warnings=False + ): + for i, dt in enumerate(df.index): + for col in range(100, 200): + if col not in wasCol: + wasCol[col] = 1 + df[col] = np.nan + if using_copy_on_write: + df.loc[dt, col] = i + else: + df[col][dt] = i + + myid = 100 + + first = len(df.loc[pd.isna(df[myid]), [myid]]) + second = len(df.loc[pd.isna(df[myid]), [myid]]) + assert first == second == 0 + + def test_constructor_no_pandas_array(self): + # Ensure that NumpyExtensionArray isn't allowed inside Series + # See https://github.com/pandas-dev/pandas/issues/23995 for more. + arr = Series([1, 2, 3]).array + result = DataFrame({"A": arr}) + expected = DataFrame({"A": [1, 2, 3]}) + tm.assert_frame_equal(result, expected) + assert isinstance(result._mgr.blocks[0], NumpyBlock) + assert result._mgr.blocks[0].is_numeric + + def test_add_column_with_pandas_array(self): + # GH 26390 + df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df["c"] = pd.arrays.NumpyExtensionArray(np.array([1, 2, None, 3], dtype=object)) + df2 = DataFrame( + { + "a": [1, 2, 3, 4], + "b": ["a", "b", "c", "d"], + "c": pd.arrays.NumpyExtensionArray( + np.array([1, 2, None, 3], dtype=object) + ), + } + ) + assert type(df["c"]._mgr.blocks[0]) == NumpyBlock + assert df["c"]._mgr.blocks[0].is_object + assert type(df2["c"]._mgr.blocks[0]) == NumpyBlock + assert df2["c"]._mgr.blocks[0].is_object + tm.assert_frame_equal(df, df2) + + +def test_update_inplace_sets_valid_block_values(using_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/33457 + df = DataFrame({"a": Series([1, 2, None], dtype="category")}) + + # inplace update of a single column + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(1, inplace=True) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + df["a"].fillna(1, inplace=True) + + # check we haven't put a Series into any block.values + assert isinstance(df._mgr.blocks[0].values, Categorical) + + if not using_copy_on_write: + # smoketest for OP bug from GH#35731 + assert df.isnull().sum().sum() == 0 + + +def test_nonconsolidated_item_cache_take(): + # https://github.com/pandas-dev/pandas/issues/35521 + + # create non-consolidated dataframe with object dtype columns + df = DataFrame() + df["col1"] = Series(["a"], dtype=object) + df["col2"] = Series([0], dtype=object) + + # access column (item cache) + df["col1"] == "A" + # take operation + # (regression was that this consolidated but didn't reset item cache, + # resulting in an invalid cache and the .at operation not working properly) + df[df["col2"] == 0] + + # now setting value should update actual dataframe + df.at[0, "col1"] = "A" + + expected = DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + tm.assert_frame_equal(df, expected) + assert df.at[0, "col1"] == "A" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..cae2f6e81d384149a5d52b1fed7f3c58f2413365 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_constructors.py @@ -0,0 +1,3348 @@ +import array +from collections import ( + OrderedDict, + abc, + defaultdict, + namedtuple, +) +from collections.abc import Iterator +from dataclasses import make_dataclass +from datetime import ( + date, + datetime, + timedelta, +) +import functools +import re + +import numpy as np +from numpy import ma +from numpy.ma import mrecords +import pytest +import pytz + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat.numpy import np_version_gt2 +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, + NumpyEADtype, + PeriodDtype, +) + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + Interval, + MultiIndex, + Period, + RangeIndex, + Series, + Timedelta, + Timestamp, + cut, + date_range, + isna, +) +import pandas._testing as tm +from pandas.arrays import ( + DatetimeArray, + IntervalArray, + PeriodArray, + SparseArray, + TimedeltaArray, +) + +MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"] +MIXED_INT_DTYPES = [ + "uint8", + "uint16", + "uint32", + "uint64", + "int8", + "int16", + "int32", + "int64", +] + + +class TestDataFrameConstructors: + def test_constructor_from_ndarray_with_str_dtype(self): + # If we don't ravel/reshape around ensure_str_array, we end up + # with an array of strings each of which is e.g. "[0 1 2]" + arr = np.arange(12).reshape(4, 3) + df = DataFrame(arr, dtype=str) + expected = DataFrame(arr.astype(str), dtype=object) + tm.assert_frame_equal(df, expected) + + def test_constructor_from_2d_datetimearray(self, using_array_manager): + dti = date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2) + + df = DataFrame(dta) + expected = DataFrame({0: dta[:, 0], 1: dta[:, 1]}) + tm.assert_frame_equal(df, expected) + if not using_array_manager: + # GH#44724 big performance hit if we de-consolidate + assert len(df._mgr.blocks) == 1 + + def test_constructor_dict_with_tzaware_scalar(self): + # GH#42505 + dt = Timestamp("2019-11-03 01:00:00-0700").tz_convert("America/Los_Angeles") + dt = dt.as_unit("ns") + + df = DataFrame({"dt": dt}, index=[0]) + expected = DataFrame({"dt": [dt]}) + tm.assert_frame_equal(df, expected) + + # Non-homogeneous + df = DataFrame({"dt": dt, "value": [1]}) + expected = DataFrame({"dt": [dt], "value": [1]}) + tm.assert_frame_equal(df, expected) + + def test_construct_ndarray_with_nas_and_int_dtype(self): + # GH#26919 match Series by not casting np.nan to meaningless int + arr = np.array([[1, np.nan], [2, 3]]) + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr, dtype="i8") + + # check this matches Series behavior + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0], dtype="i8", name=0) + + def test_construct_from_list_of_datetimes(self): + df = DataFrame([datetime.now(), datetime.now()]) + assert df[0].dtype == np.dtype("M8[ns]") + + def test_constructor_from_tzaware_datetimeindex(self): + # don't cast a DatetimeIndex WITH a tz, leave as object + # GH#6032 + naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B") + idx = naive.tz_localize("US/Pacific") + + expected = Series(np.array(idx.tolist(), dtype="object"), name="B") + assert expected.dtype == idx.dtype + + # convert index to series + result = Series(idx) + tm.assert_series_equal(result, expected) + + def test_columns_with_leading_underscore_work_with_to_dict(self): + col_underscore = "_b" + df = DataFrame({"a": [1, 2], col_underscore: [3, 4]}) + d = df.to_dict(orient="records") + + ref_d = [{"a": 1, col_underscore: 3}, {"a": 2, col_underscore: 4}] + + assert ref_d == d + + def test_columns_with_leading_number_and_underscore_work_with_to_dict(self): + col_with_num = "1_b" + df = DataFrame({"a": [1, 2], col_with_num: [3, 4]}) + d = df.to_dict(orient="records") + + ref_d = [{"a": 1, col_with_num: 3}, {"a": 2, col_with_num: 4}] + + assert ref_d == d + + def test_array_of_dt64_nat_with_td64dtype_raises(self, frame_or_series): + # GH#39462 + nat = np.datetime64("NaT", "ns") + arr = np.array([nat], dtype=object) + if frame_or_series is DataFrame: + arr = arr.reshape(1, 1) + + msg = "Invalid type for timedelta scalar: " + with pytest.raises(TypeError, match=msg): + frame_or_series(arr, dtype="m8[ns]") + + @pytest.mark.parametrize("kind", ["m", "M"]) + def test_datetimelike_values_with_object_dtype(self, kind, frame_or_series): + # with dtype=object, we should cast dt64 values to Timestamps, not pydatetimes + if kind == "M": + dtype = "M8[ns]" + scalar_type = Timestamp + else: + dtype = "m8[ns]" + scalar_type = Timedelta + + arr = np.arange(6, dtype="i8").view(dtype).reshape(3, 2) + if frame_or_series is Series: + arr = arr[:, 0] + + obj = frame_or_series(arr, dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + # go through a different path in internals.construction + obj = frame_or_series(frame_or_series(arr), dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + obj = frame_or_series(frame_or_series(arr), dtype=NumpyEADtype(object)) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + if frame_or_series is DataFrame: + # other paths through internals.construction + sers = [Series(x) for x in arr] + obj = frame_or_series(sers, dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + def test_series_with_name_not_matching_column(self): + # GH#9232 + x = Series(range(5), name=1) + y = Series(range(5), name=0) + + result = DataFrame(x, columns=[0]) + expected = DataFrame([], columns=[0]) + tm.assert_frame_equal(result, expected) + + result = DataFrame(y, columns=[1]) + expected = DataFrame([], columns=[1]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "constructor", + [ + lambda: DataFrame(), + lambda: DataFrame(None), + lambda: DataFrame(()), + lambda: DataFrame([]), + lambda: DataFrame(_ for _ in []), + lambda: DataFrame(range(0)), + lambda: DataFrame(data=None), + lambda: DataFrame(data=()), + lambda: DataFrame(data=[]), + lambda: DataFrame(data=(_ for _ in [])), + lambda: DataFrame(data=range(0)), + ], + ) + def test_empty_constructor(self, constructor): + expected = DataFrame() + result = constructor() + assert len(result.index) == 0 + assert len(result.columns) == 0 + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "constructor", + [ + lambda: DataFrame({}), + lambda: DataFrame(data={}), + ], + ) + def test_empty_constructor_object_index(self, constructor): + expected = DataFrame(index=RangeIndex(0), columns=RangeIndex(0)) + result = constructor() + assert len(result.index) == 0 + assert len(result.columns) == 0 + tm.assert_frame_equal(result, expected, check_index_type=True) + + @pytest.mark.parametrize( + "emptylike,expected_index,expected_columns", + [ + ([[]], RangeIndex(1), RangeIndex(0)), + ([[], []], RangeIndex(2), RangeIndex(0)), + ([(_ for _ in [])], RangeIndex(1), RangeIndex(0)), + ], + ) + def test_emptylike_constructor(self, emptylike, expected_index, expected_columns): + expected = DataFrame(index=expected_index, columns=expected_columns) + result = DataFrame(emptylike) + tm.assert_frame_equal(result, expected) + + def test_constructor_mixed(self, float_string_frame, using_infer_string): + dtype = "string" if using_infer_string else np.object_ + assert float_string_frame["foo"].dtype == dtype + + def test_constructor_cast_failure(self): + # as of 2.0, we raise if we can't respect "dtype", previously we + # silently ignored + msg = "could not convert string to float" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) + + # GH 3010, constructing with odd arrays + df = DataFrame(np.ones((4, 2))) + + # this is ok + df["foo"] = np.ones((4, 2)).tolist() + + # this is not ok + msg = "Expected a 1D array, got an array with shape \\(4, 2\\)" + with pytest.raises(ValueError, match=msg): + df["test"] = np.ones((4, 2)) + + # this is ok + df["foo2"] = np.ones((4, 2)).tolist() + + def test_constructor_dtype_copy(self): + orig_df = DataFrame({"col1": [1.0], "col2": [2.0], "col3": [3.0]}) + + new_df = DataFrame(orig_df, dtype=float, copy=True) + + new_df["col1"] = 200.0 + assert orig_df["col1"][0] == 1.0 + + def test_constructor_dtype_nocast_view_dataframe( + self, using_copy_on_write, warn_copy_on_write + ): + df = DataFrame([[1, 2]]) + should_be_view = DataFrame(df, dtype=df[0].dtype) + if using_copy_on_write: + should_be_view.iloc[0, 0] = 99 + assert df.values[0, 0] == 1 + else: + with tm.assert_cow_warning(warn_copy_on_write): + should_be_view.iloc[0, 0] = 99 + assert df.values[0, 0] == 99 + + def test_constructor_dtype_nocast_view_2d_array( + self, using_array_manager, using_copy_on_write, warn_copy_on_write + ): + df = DataFrame([[1, 2], [3, 4]], dtype="int64") + if not using_array_manager and not using_copy_on_write: + should_be_view = DataFrame(df.values, dtype=df[0].dtype) + # TODO(CoW-warn) this should warn + # with tm.assert_cow_warning(warn_copy_on_write): + should_be_view.iloc[0, 0] = 97 + assert df.values[0, 0] == 97 + else: + # INFO(ArrayManager) DataFrame(ndarray) doesn't necessarily preserve + # a view on the array to ensure contiguous 1D arrays + df2 = DataFrame(df.values, dtype=df[0].dtype) + assert df2._mgr.arrays[0].flags.c_contiguous + + @td.skip_array_manager_invalid_test + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="conversion copies") + def test_1d_object_array_does_not_copy(self): + # https://github.com/pandas-dev/pandas/issues/39272 + arr = np.array(["a", "b"], dtype="object") + df = DataFrame(arr, copy=False) + assert np.shares_memory(df.values, arr) + + @td.skip_array_manager_invalid_test + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="conversion copies") + def test_2d_object_array_does_not_copy(self): + # https://github.com/pandas-dev/pandas/issues/39272 + arr = np.array([["a", "b"], ["c", "d"]], dtype="object") + df = DataFrame(arr, copy=False) + assert np.shares_memory(df.values, arr) + + def test_constructor_dtype_list_data(self): + df = DataFrame([[1, "2"], [None, "a"]], dtype=object) + assert df.loc[1, 0] is None + assert df.loc[0, 1] == "2" + + def test_constructor_list_of_2d_raises(self): + # https://github.com/pandas-dev/pandas/issues/32289 + a = DataFrame() + b = np.empty((0, 0)) + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + DataFrame([a]) + + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + DataFrame([b]) + + a = DataFrame({"A": [1, 2]}) + with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"): + DataFrame([a, a]) + + @pytest.mark.parametrize( + "typ, ad", + [ + # mixed floating and integer coexist in the same frame + ["float", {}], + # add lots of types + ["float", {"A": 1, "B": "foo", "C": "bar"}], + # GH 622 + ["int", {}], + ], + ) + def test_constructor_mixed_dtypes(self, typ, ad): + if typ == "int": + dtypes = MIXED_INT_DTYPES + arrays = [ + np.array(np.random.default_rng(2).random(10), dtype=d) for d in dtypes + ] + elif typ == "float": + dtypes = MIXED_FLOAT_DTYPES + arrays = [ + np.array(np.random.default_rng(2).integers(10, size=10), dtype=d) + for d in dtypes + ] + + for d, a in zip(dtypes, arrays): + assert a.dtype == d + ad.update(dict(zip(dtypes, arrays))) + df = DataFrame(ad) + + dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES + for d in dtypes: + if d in df: + assert df.dtypes[d] == d + + def test_constructor_complex_dtypes(self): + # GH10952 + a = np.random.default_rng(2).random(10).astype(np.complex64) + b = np.random.default_rng(2).random(10).astype(np.complex128) + + df = DataFrame({"a": a, "b": b}) + assert a.dtype == df.a.dtype + assert b.dtype == df.b.dtype + + def test_constructor_dtype_str_na_values(self, string_dtype): + # https://github.com/pandas-dev/pandas/issues/21083 + df = DataFrame({"A": ["x", None]}, dtype=string_dtype) + result = df.isna() + expected = DataFrame({"A": [False, True]}) + tm.assert_frame_equal(result, expected) + assert df.iloc[1, 0] is None + + df = DataFrame({"A": ["x", np.nan]}, dtype=string_dtype) + assert np.isnan(df.iloc[1, 0]) + + def test_constructor_rec(self, float_frame): + rec = float_frame.to_records(index=False) + rec.dtype.names = list(rec.dtype.names)[::-1] + + index = float_frame.index + + df = DataFrame(rec) + tm.assert_index_equal(df.columns, Index(rec.dtype.names)) + + df2 = DataFrame(rec, index=index) + tm.assert_index_equal(df2.columns, Index(rec.dtype.names)) + tm.assert_index_equal(df2.index, index) + + # case with columns != the ones we would infer from the data + rng = np.arange(len(rec))[::-1] + df3 = DataFrame(rec, index=rng, columns=["C", "B"]) + expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"]) + tm.assert_frame_equal(df3, expected) + + def test_constructor_bool(self): + df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)}) + assert df.values.dtype == np.bool_ + + def test_constructor_overflow_int64(self): + # see gh-14881 + values = np.array([2**64 - i for i in range(1, 10)], dtype=np.uint64) + + result = DataFrame({"a": values}) + assert result["a"].dtype == np.uint64 + + # see gh-2355 + data_scores = [ + (6311132704823138710, 273), + (2685045978526272070, 23), + (8921811264899370420, 45), + (17019687244989530680, 270), + (9930107427299601010, 273), + ] + dtype = [("uid", "u8"), ("score", "u8")] + data = np.zeros((len(data_scores),), dtype=dtype) + data[:] = data_scores + df_crawls = DataFrame(data) + assert df_crawls["uid"].dtype == np.uint64 + + @pytest.mark.parametrize( + "values", + [ + np.array([2**64], dtype=object), + np.array([2**65]), + [2**64 + 1], + np.array([-(2**63) - 4], dtype=object), + np.array([-(2**64) - 1]), + [-(2**65) - 2], + ], + ) + def test_constructor_int_overflow(self, values): + # see gh-18584 + value = values[0] + result = DataFrame(values) + + assert result[0].dtype == object + assert result[0][0] == value + + @pytest.mark.parametrize( + "values", + [ + np.array([1], dtype=np.uint16), + np.array([1], dtype=np.uint32), + np.array([1], dtype=np.uint64), + [np.uint16(1)], + [np.uint32(1)], + [np.uint64(1)], + ], + ) + def test_constructor_numpy_uints(self, values): + # GH#47294 + value = values[0] + result = DataFrame(values) + + assert result[0].dtype == value.dtype + assert result[0][0] == value + + def test_constructor_ordereddict(self): + nitems = 100 + nums = list(range(nitems)) + np.random.default_rng(2).shuffle(nums) + expected = [f"A{i:d}" for i in nums] + df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems))) + assert expected == list(df.columns) + + def test_constructor_dict(self): + datetime_series = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + # test expects index shifted by 5 + datetime_series_short = datetime_series[5:] + + frame = DataFrame({"col1": datetime_series, "col2": datetime_series_short}) + + # col2 is padded with NaN + assert len(datetime_series) == 30 + assert len(datetime_series_short) == 25 + + tm.assert_series_equal(frame["col1"], datetime_series.rename("col1")) + + exp = Series( + np.concatenate([[np.nan] * 5, datetime_series_short.values]), + index=datetime_series.index, + name="col2", + ) + tm.assert_series_equal(exp, frame["col2"]) + + frame = DataFrame( + {"col1": datetime_series, "col2": datetime_series_short}, + columns=["col2", "col3", "col4"], + ) + + assert len(frame) == len(datetime_series_short) + assert "col1" not in frame + assert isna(frame["col3"]).all() + + # Corner cases + assert len(DataFrame()) == 0 + + # mix dict and array, wrong size - no spec for which error should raise + # first + msg = "Mixing dicts with non-Series may lead to ambiguous ordering." + with pytest.raises(ValueError, match=msg): + DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) + + def test_constructor_dict_length1(self): + # Length-one dict micro-optimization + frame = DataFrame({"A": {"1": 1, "2": 2}}) + tm.assert_index_equal(frame.index, Index(["1", "2"])) + + def test_constructor_dict_with_index(self): + # empty dict plus index + idx = Index([0, 1, 2]) + frame = DataFrame({}, index=idx) + assert frame.index is idx + + def test_constructor_dict_with_index_and_columns(self): + # empty dict with index and columns + idx = Index([0, 1, 2]) + frame = DataFrame({}, index=idx, columns=idx) + assert frame.index is idx + assert frame.columns is idx + assert len(frame._series) == 3 + + def test_constructor_dict_of_empty_lists(self): + # with dict of empty list and Series + frame = DataFrame({"A": [], "B": []}, columns=["A", "B"]) + tm.assert_index_equal(frame.index, RangeIndex(0), exact=True) + + def test_constructor_dict_with_none(self): + # GH 14381 + # Dict with None value + frame_none = DataFrame({"a": None}, index=[0]) + frame_none_list = DataFrame({"a": [None]}, index=[0]) + assert frame_none._get_value(0, "a") is None + assert frame_none_list._get_value(0, "a") is None + tm.assert_frame_equal(frame_none, frame_none_list) + + def test_constructor_dict_errors(self): + # GH10856 + # dict with scalar values should raise error, even if columns passed + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": 0.7}) + + with pytest.raises(ValueError, match=msg): + DataFrame({"a": 0.7}, columns=["a"]) + + @pytest.mark.parametrize("scalar", [2, np.nan, None, "D"]) + def test_constructor_invalid_items_unused(self, scalar): + # No error if invalid (scalar) value is in fact not used: + result = DataFrame({"a": scalar}, columns=["b"]) + expected = DataFrame(columns=["b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) + def test_constructor_dict_nan_key(self, value): + # GH 18455 + cols = [1, value, 3] + idx = ["a", value] + values = [[0, 3], [1, 4], [2, 5]] + data = {cols[c]: Series(values[c], index=idx) for c in range(3)} + result = DataFrame(data).sort_values(1).sort_values("a", axis=1) + expected = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols + ) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx).sort_values("a", axis=1) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx, columns=cols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("value", [np.nan, None, float("nan")]) + def test_constructor_dict_nan_tuple_key(self, value): + # GH 18455 + cols = Index([(11, 21), (value, 22), (13, value)]) + idx = Index([("a", value), (value, 2)]) + values = [[0, 3], [1, 4], [2, 5]] + data = {cols[c]: Series(values[c], index=idx) for c in range(3)} + result = DataFrame(data).sort_values((11, 21)).sort_values(("a", value), axis=1) + expected = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols + ) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx).sort_values(("a", value), axis=1) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx, columns=cols) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_order_insertion(self): + datetime_series = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + datetime_series_short = datetime_series[:5] + + # GH19018 + # initialization ordering: by insertion order if python>= 3.6 + d = {"b": datetime_series_short, "a": datetime_series} + frame = DataFrame(data=d) + expected = DataFrame(data=d, columns=list("ba")) + tm.assert_frame_equal(frame, expected) + + def test_constructor_dict_nan_key_and_columns(self): + # GH 16894 + result = DataFrame({np.nan: [1, 2], 2: [2, 3]}, columns=[np.nan, 2]) + expected = DataFrame([[1, 2], [2, 3]], columns=[np.nan, 2]) + tm.assert_frame_equal(result, expected) + + def test_constructor_multi_index(self): + # GH 4078 + # construction error with mi and all-nan frame + tuples = [(2, 3), (3, 3), (3, 3)] + mi = MultiIndex.from_tuples(tuples) + df = DataFrame(index=mi, columns=mi) + assert isna(df).values.ravel().all() + + tuples = [(3, 3), (2, 3), (3, 3)] + mi = MultiIndex.from_tuples(tuples) + df = DataFrame(index=mi, columns=mi) + assert isna(df).values.ravel().all() + + def test_constructor_2d_index(self): + # GH 25416 + # handling of 2d index in construction + df = DataFrame([[1]], columns=[[1]], index=[1, 2]) + expected = DataFrame( + [1, 1], + index=Index([1, 2], dtype="int64"), + columns=MultiIndex(levels=[[1]], codes=[[0]]), + ) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[1]], columns=[[1]], index=[[1, 2]]) + expected = DataFrame( + [1, 1], + index=MultiIndex(levels=[[1, 2]], codes=[[0, 1]]), + columns=MultiIndex(levels=[[1]], codes=[[0]]), + ) + tm.assert_frame_equal(df, expected) + + def test_constructor_error_msgs(self): + msg = "Empty data passed with indices specified." + # passing an empty array with columns specified. + with pytest.raises(ValueError, match=msg): + DataFrame(np.empty(0), index=[1]) + + msg = "Mixing dicts with non-Series may lead to ambiguous ordering." + # mix dict and array, wrong size + with pytest.raises(ValueError, match=msg): + DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) + + # wrong size ndarray, GH 3105 + msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.arange(12).reshape((4, 3)), + columns=["foo", "bar", "baz"], + index=date_range("2000-01-01", periods=3), + ) + + arr = np.array([[4, 5, 6]]) + msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(4), data=arr) + + arr = np.array([4, 5, 6]) + msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(4), data=arr) + + # higher dim raise exception + with pytest.raises(ValueError, match="Must pass 2-d input"): + DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1]) + + # wrong size axis labels + msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.random.default_rng(2).random((2, 3)), + columns=["A", "B", "C"], + index=[1], + ) + + msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.random.default_rng(2).random((2, 3)), + columns=["A", "B"], + index=[1, 2], + ) + + # gh-26429 + msg = "2 columns passed, passed data had 10 columns" + with pytest.raises(ValueError, match=msg): + DataFrame((range(10), range(10, 20)), columns=("ones", "twos")) + + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": False, "b": True}) + + def test_constructor_subclass_dict(self, dict_subclass): + # Test for passing dict subclass to constructor + data = { + "col1": dict_subclass((x, 10.0 * x) for x in range(10)), + "col2": dict_subclass((x, 20.0 * x) for x in range(10)), + } + df = DataFrame(data) + refdf = DataFrame({col: dict(val.items()) for col, val in data.items()}) + tm.assert_frame_equal(refdf, df) + + data = dict_subclass(data.items()) + df = DataFrame(data) + tm.assert_frame_equal(refdf, df) + + def test_constructor_defaultdict(self, float_frame): + # try with defaultdict + data = {} + float_frame.loc[: float_frame.index[10], "B"] = np.nan + + for k, v in float_frame.items(): + dct = defaultdict(dict) + dct.update(v.to_dict()) + data[k] = dct + frame = DataFrame(data) + expected = frame.reindex(index=float_frame.index) + tm.assert_frame_equal(float_frame, expected) + + def test_constructor_dict_block(self): + expected = np.array([[4.0, 3.0, 2.0, 1.0]]) + df = DataFrame( + {"d": [4.0], "c": [3.0], "b": [2.0], "a": [1.0]}, + columns=["d", "c", "b", "a"], + ) + tm.assert_numpy_array_equal(df.values, expected) + + def test_constructor_dict_cast(self, using_infer_string): + # cast float tests + test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}} + frame = DataFrame(test_data, dtype=float) + assert len(frame) == 3 + assert frame["B"].dtype == np.float64 + assert frame["A"].dtype == np.float64 + + frame = DataFrame(test_data) + assert len(frame) == 3 + assert frame["B"].dtype == np.object_ if not using_infer_string else "string" + assert frame["A"].dtype == np.float64 + + def test_constructor_dict_cast2(self): + # can't cast to float + test_data = { + "A": dict(zip(range(20), [f"word_{i}" for i in range(20)])), + "B": dict(zip(range(15), np.random.default_rng(2).standard_normal(15))), + } + with pytest.raises(ValueError, match="could not convert string"): + DataFrame(test_data, dtype=float) + + def test_constructor_dict_dont_upcast(self): + d = {"Col1": {"Row1": "A String", "Row2": np.nan}} + df = DataFrame(d) + assert isinstance(df["Col1"]["Row2"], float) + + def test_constructor_dict_dont_upcast2(self): + dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2]) + assert isinstance(dm[1][1], int) + + def test_constructor_dict_of_tuples(self): + # GH #1491 + data = {"a": (1, 2, 3), "b": (4, 5, 6)} + + result = DataFrame(data) + expected = DataFrame({k: list(v) for k, v in data.items()}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_dict_of_ranges(self): + # GH 26356 + data = {"a": range(3), "b": range(3, 6)} + + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_of_iterators(self): + # GH 26349 + data = {"a": iter(range(3)), "b": reversed(range(3))} + + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_of_generators(self): + # GH 26349 + data = {"a": (i for i in (range(3))), "b": (i for i in reversed(range(3)))} + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_multiindex(self): + d = { + ("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2}, + ("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4}, + ("b", "c"): {("i", "i"): 7, ("i", "j"): 8, ("j", "i"): 9}, + } + _d = sorted(d.items()) + df = DataFrame(d) + expected = DataFrame( + [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) + ).T + expected.index = MultiIndex.from_tuples(expected.index) + tm.assert_frame_equal( + df, + expected, + ) + + d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111} + _d.insert(0, ("z", d["z"])) + expected = DataFrame( + [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False) + ).T + expected.index = Index(expected.index, tupleize_cols=False) + df = DataFrame(d) + df = df.reindex(columns=expected.columns, index=expected.index) + tm.assert_frame_equal(df, expected) + + def test_constructor_dict_datetime64_index(self): + # GH 10160 + dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"] + + def create_data(constructor): + return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)} + + data_datetime64 = create_data(np.datetime64) + data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d")) + data_Timestamp = create_data(Timestamp) + + expected = DataFrame( + [ + {0: 0, 1: None, 2: None, 3: None}, + {0: None, 1: 2, 2: None, 3: None}, + {0: None, 1: None, 2: 4, 3: None}, + {0: None, 1: None, 2: None, 3: 6}, + ], + index=[Timestamp(dt) for dt in dates_as_str], + ) + + result_datetime64 = DataFrame(data_datetime64) + result_datetime = DataFrame(data_datetime) + result_Timestamp = DataFrame(data_Timestamp) + tm.assert_frame_equal(result_datetime64, expected) + tm.assert_frame_equal(result_datetime, expected) + tm.assert_frame_equal(result_Timestamp, expected) + + @pytest.mark.parametrize( + "klass,name", + [ + (lambda x: np.timedelta64(x, "D"), "timedelta64"), + (lambda x: timedelta(days=x), "pytimedelta"), + (lambda x: Timedelta(x, "D"), "Timedelta[ns]"), + (lambda x: Timedelta(x, "D").as_unit("s"), "Timedelta[s]"), + ], + ) + def test_constructor_dict_timedelta64_index(self, klass, name): + # GH 10160 + td_as_int = [1, 2, 3, 4] + + data = {i: {klass(s): 2 * i} for i, s in enumerate(td_as_int)} + + expected = DataFrame( + [ + {0: 0, 1: None, 2: None, 3: None}, + {0: None, 1: 2, 2: None, 3: None}, + {0: None, 1: None, 2: 4, 3: None}, + {0: None, 1: None, 2: None, 3: 6}, + ], + index=[Timedelta(td, "D") for td in td_as_int], + ) + + result = DataFrame(data) + + tm.assert_frame_equal(result, expected) + + def test_constructor_period_dict(self): + # PeriodIndex + a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M") + b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D") + df = DataFrame({"a": a, "b": b}) + assert df["a"].dtype == a.dtype + assert df["b"].dtype == b.dtype + + # list of periods + df = DataFrame({"a": a.astype(object).tolist(), "b": b.astype(object).tolist()}) + assert df["a"].dtype == a.dtype + assert df["b"].dtype == b.dtype + + def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): + ea_scalar, ea_dtype = ea_scalar_and_dtype + df = DataFrame({"a": ea_scalar}, index=[0]) + assert df["a"].dtype == ea_dtype + + expected = DataFrame(index=[0], columns=["a"], data=ea_scalar) + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "data,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + ], + ) + def test_constructor_extension_scalar_data(self, data, dtype): + # GH 34832 + df = DataFrame(index=[0, 1], columns=["a", "b"], data=data) + + assert df["a"].dtype == dtype + assert df["b"].dtype == dtype + + arr = pd.array([data] * 2, dtype=dtype) + expected = DataFrame({"a": arr, "b": arr}) + + tm.assert_frame_equal(df, expected) + + def test_nested_dict_frame_constructor(self): + rng = pd.period_range("1/1/2000", periods=5) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)), columns=rng) + + data = {} + for col in df.columns: + for row in df.index: + data.setdefault(col, {})[row] = df._get_value(row, col) + + result = DataFrame(data, columns=rng) + tm.assert_frame_equal(result, df) + + data = {} + for col in df.columns: + for row in df.index: + data.setdefault(row, {})[col] = df._get_value(row, col) + + result = DataFrame(data, index=rng).T + tm.assert_frame_equal(result, df) + + def _check_basic_constructor(self, empty): + # mat: 2d matrix with shape (3, 2) to input. empty - makes sized + # objects + mat = empty((2, 3), dtype=float) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + + # 1-D input + frame = DataFrame(empty((3,)), columns=["A"], index=[1, 2, 3]) + assert len(frame.index) == 3 + assert len(frame.columns) == 1 + + if empty is not np.ones: + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + return + else: + frame = DataFrame( + mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64 + ) + assert frame.values.dtype == np.int64 + + # wrong size axis labels + msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1]) + msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(mat, columns=["A", "B"], index=[1, 2]) + + # higher dim raise exception + with pytest.raises(ValueError, match="Must pass 2-d input"): + DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1]) + + # automatic labeling + frame = DataFrame(mat) + tm.assert_index_equal(frame.index, Index(range(2)), exact=True) + tm.assert_index_equal(frame.columns, Index(range(3)), exact=True) + + frame = DataFrame(mat, index=[1, 2]) + tm.assert_index_equal(frame.columns, Index(range(3)), exact=True) + + frame = DataFrame(mat, columns=["A", "B", "C"]) + tm.assert_index_equal(frame.index, Index(range(2)), exact=True) + + # 0-length axis + frame = DataFrame(empty((0, 3))) + assert len(frame.index) == 0 + + frame = DataFrame(empty((3, 0))) + assert len(frame.columns) == 0 + + def test_constructor_ndarray(self): + self._check_basic_constructor(np.ones) + + frame = DataFrame(["foo", "bar"], index=[0, 1], columns=["A"]) + assert len(frame) == 2 + + def test_constructor_maskedarray(self): + self._check_basic_constructor(ma.masked_all) + + # Check non-masked values + mat = ma.masked_all((2, 3), dtype=float) + mat[0, 0] = 1.0 + mat[1, 2] = 2.0 + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + assert 1.0 == frame["A"][1] + assert 2.0 == frame["C"][2] + + # what is this even checking?? + mat = ma.masked_all((2, 3), dtype=float) + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + assert np.all(~np.asarray(frame == frame)) + + @pytest.mark.filterwarnings( + "ignore:elementwise comparison failed:DeprecationWarning" + ) + def test_constructor_maskedarray_nonfloat(self): + # masked int promoted to float + mat = ma.masked_all((2, 3), dtype=int) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert np.all(~np.asarray(frame == frame)) + + # cast type + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.float64) + assert frame.values.dtype == np.float64 + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = 1 + mat2[1, 2] = 2 + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert 1 == frame["A"][1] + assert 2 == frame["C"][2] + + # masked np.datetime64 stays (use NaT as null) + mat = ma.masked_all((2, 3), dtype="M8[ns]") + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert isna(frame).values.all() + + # cast type + msg = r"datetime64\[ns\] values and dtype=int64 is not supported" + with pytest.raises(TypeError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = 1 + mat2[1, 2] = 2 + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert 1 == frame["A"].astype("i8")[1] + assert 2 == frame["C"].astype("i8")[2] + + # masked bool promoted to object + mat = ma.masked_all((2, 3), dtype=bool) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert np.all(~np.asarray(frame == frame)) + + # cast type + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=object) + assert frame.values.dtype == object + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = True + mat2[1, 2] = False + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert frame["A"][1] is True + assert frame["C"][2] is False + + def test_constructor_maskedarray_hardened(self): + # Check numpy masked arrays with hard masks -- from GH24574 + mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask() + result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2]) + expected = DataFrame( + {"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, + columns=["A", "B"], + index=[1, 2], + dtype=float, + ) + tm.assert_frame_equal(result, expected) + # Check case where mask is hard but no data are masked + mat_hard = ma.ones((2, 2), dtype=float).harden_mask() + result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2]) + expected = DataFrame( + {"A": [1.0, 1.0], "B": [1.0, 1.0]}, + columns=["A", "B"], + index=[1, 2], + dtype=float, + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_maskedrecarray_dtype(self): + # Ensure constructor honors dtype + data = np.ma.array( + np.ma.zeros(5, dtype=[("date", " None: + self._lst = lst + + def __getitem__(self, n): + return self._lst.__getitem__(n) + + def __len__(self) -> int: + return self._lst.__len__() + + lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])] + columns = ["num", "str"] + result = DataFrame(lst_containers, columns=columns) + expected = DataFrame([[1, "a"], [2, "b"]], columns=columns) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_stdlib_array(self): + # GH 4297 + # support Array + result = DataFrame({"A": array.array("i", range(10))}) + expected = DataFrame({"A": list(range(10))}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + expected = DataFrame([list(range(10)), list(range(10))]) + result = DataFrame([array.array("i", range(10)), array.array("i", range(10))]) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_range(self): + # GH26342 + result = DataFrame(range(10)) + expected = DataFrame(list(range(10))) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_ranges(self): + result = DataFrame([range(10), range(10)]) + expected = DataFrame([list(range(10)), list(range(10))]) + tm.assert_frame_equal(result, expected) + + def test_constructor_iterable(self): + # GH 21987 + class Iter: + def __iter__(self) -> Iterator: + for i in range(10): + yield [1, 2, 3] + + expected = DataFrame([[1, 2, 3]] * 10) + result = DataFrame(Iter()) + tm.assert_frame_equal(result, expected) + + def test_constructor_iterator(self): + result = DataFrame(iter(range(10))) + expected = DataFrame(list(range(10))) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_iterators(self): + result = DataFrame([iter(range(10)), iter(range(10))]) + expected = DataFrame([list(range(10)), list(range(10))]) + tm.assert_frame_equal(result, expected) + + def test_constructor_generator(self): + # related #2305 + + gen1 = (i for i in range(10)) + gen2 = (i for i in range(10)) + + expected = DataFrame([list(range(10)), list(range(10))]) + result = DataFrame([gen1, gen2]) + tm.assert_frame_equal(result, expected) + + gen = ([i, "a"] for i in range(10)) + result = DataFrame(gen) + expected = DataFrame({0: range(10), 1: "a"}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_list_of_dicts(self): + result = DataFrame([{}]) + expected = DataFrame(index=RangeIndex(1), columns=[]) + tm.assert_frame_equal(result, expected) + + def test_constructor_ordered_dict_nested_preserve_order(self): + # see gh-18166 + nested1 = OrderedDict([("b", 1), ("a", 2)]) + nested2 = OrderedDict([("b", 2), ("a", 5)]) + data = OrderedDict([("col2", nested1), ("col1", nested2)]) + result = DataFrame(data) + data = {"col2": [1, 2], "col1": [2, 5]} + expected = DataFrame(data=data, index=["b", "a"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dict_type", [dict, OrderedDict]) + def test_constructor_ordered_dict_preserve_order(self, dict_type): + # see gh-13304 + expected = DataFrame([[2, 1]], columns=["b", "a"]) + + data = dict_type() + data["b"] = [2] + data["a"] = [1] + + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + data = dict_type() + data["b"] = 2 + data["a"] = 1 + + result = DataFrame([data]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dict_type", [dict, OrderedDict]) + def test_constructor_ordered_dict_conflicting_orders(self, dict_type): + # the first dict element sets the ordering for the DataFrame, + # even if there are conflicting orders from subsequent ones + row_one = dict_type() + row_one["b"] = 2 + row_one["a"] = 1 + + row_two = dict_type() + row_two["a"] = 1 + row_two["b"] = 2 + + row_three = {"b": 2, "a": 1} + + expected = DataFrame([[2, 1], [2, 1]], columns=["b", "a"]) + result = DataFrame([row_one, row_two]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=["b", "a"]) + result = DataFrame([row_one, row_two, row_three]) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_series_aligned_index(self): + series = [Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)] + result = DataFrame(series) + expected = DataFrame( + {"b": [0, 1, 2], "a": [0, 1, 2], "c": [0, 1, 2]}, + columns=["b", "a", "c"], + index=["0", "1", "2"], + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_derived_dicts(self): + class CustomDict(dict): + pass + + d = {"a": 1.5, "b": 3} + + data_custom = [CustomDict(d)] + data = [d] + + result_custom = DataFrame(data_custom) + result = DataFrame(data) + tm.assert_frame_equal(result, result_custom) + + def test_constructor_ragged(self): + data = { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(8), + } + with pytest.raises(ValueError, match="All arrays must be of the same length"): + DataFrame(data) + + def test_constructor_scalar(self): + idx = Index(range(3)) + df = DataFrame({"a": 0}, index=idx) + expected = DataFrame({"a": [0, 0, 0]}, index=idx) + tm.assert_frame_equal(df, expected, check_dtype=False) + + def test_constructor_Series_copy_bug(self, float_frame): + df = DataFrame(float_frame["A"], index=float_frame.index, columns=["A"]) + df.copy() + + def test_constructor_mixed_dict_and_Series(self): + data = {} + data["A"] = {"foo": 1, "bar": 2, "baz": 3} + data["B"] = Series([4, 3, 2, 1], index=["bar", "qux", "baz", "foo"]) + + result = DataFrame(data) + assert result.index.is_monotonic_increasing + + # ordering ambiguous, raise exception + with pytest.raises(ValueError, match="ambiguous ordering"): + DataFrame({"A": ["a", "b"], "B": {"a": "a", "b": "b"}}) + + # this is OK though + result = DataFrame({"A": ["a", "b"], "B": Series(["a", "b"], index=["a", "b"])}) + expected = DataFrame({"A": ["a", "b"], "B": ["a", "b"]}, index=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_constructor_mixed_type_rows(self): + # Issue 25075 + data = [[1, 2], (3, 4)] + result = DataFrame(data) + expected = DataFrame([[1, 2], [3, 4]]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "tuples,lists", + [ + ((), []), + ((()), []), + (((), ()), [(), ()]), + (((), ()), [[], []]), + (([], []), [[], []]), + (([1], [2]), [[1], [2]]), # GH 32776 + (([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]), + ], + ) + def test_constructor_tuple(self, tuples, lists): + # GH 25691 + result = DataFrame(tuples) + expected = DataFrame(lists) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_tuples(self): + result = DataFrame({"A": [(1, 2), (3, 4)]}) + expected = DataFrame({"A": Series([(1, 2), (3, 4)])}) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_namedtuples(self): + # GH11181 + named_tuple = namedtuple("Pandas", list("ab")) + tuples = [named_tuple(1, 3), named_tuple(2, 4)] + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + # with columns + expected = DataFrame({"y": [1, 2], "z": [3, 4]}) + result = DataFrame(tuples, columns=["y", "z"]) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses(self): + # GH21910 + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + data = [Point(0, 3), Point(1, 3)] + expected = DataFrame({"x": [0, 1], "y": [3, 3]}) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses_with_varying_types(self): + # GH21910 + # varying types + Point = make_dataclass("Point", [("x", int), ("y", int)]) + HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)]) + + data = [Point(0, 3), HLine(1, 3, 3)] + + expected = DataFrame( + {"x": [0, np.nan], "y": [3, 3], "x0": [np.nan, 1], "x1": [np.nan, 3]} + ) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses_error_thrown(self): + # GH21910 + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + # expect TypeError + msg = "asdict() should be called on dataclass instances" + with pytest.raises(TypeError, match=re.escape(msg)): + DataFrame([Point(0, 0), {"x": 1, "y": 0}]) + + def test_constructor_list_of_dict_order(self): + # GH10056 + data = [ + {"First": 1, "Second": 4, "Third": 7, "Fourth": 10}, + {"Second": 5, "First": 2, "Fourth": 11, "Third": 8}, + {"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13}, + ] + expected = DataFrame( + { + "First": [1, 2, 3], + "Second": [4, 5, 6], + "Third": [7, 8, 9], + "Fourth": [10, 11, 12], + "YYY": [None, None, 14], + "XXX": [None, None, 13], + } + ) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_Series_named(self): + a = Series([1, 2, 3], index=["a", "b", "c"], name="x") + df = DataFrame(a) + assert df.columns[0] == "x" + tm.assert_index_equal(df.index, a.index) + + # ndarray like + arr = np.random.default_rng(2).standard_normal(10) + s = Series(arr, name="x") + df = DataFrame(s) + expected = DataFrame({"x": s}) + tm.assert_frame_equal(df, expected) + + s = Series(arr, index=range(3, 13)) + df = DataFrame(s) + expected = DataFrame({0: s}) + tm.assert_frame_equal(df, expected) + + msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(s, columns=[1, 2]) + + # #2234 + a = Series([], name="x", dtype=object) + df = DataFrame(a) + assert df.columns[0] == "x" + + # series with name and w/o + s1 = Series(arr, name="x") + df = DataFrame([s1, arr]).T + expected = DataFrame({"x": s1, "Unnamed 0": arr}, columns=["x", "Unnamed 0"]) + tm.assert_frame_equal(df, expected) + + # this is a bit non-intuitive here; the series collapse down to arrays + df = DataFrame([arr, s1]).T + expected = DataFrame({1: s1, 0: arr}, columns=[0, 1]) + tm.assert_frame_equal(df, expected) + + def test_constructor_Series_named_and_columns(self): + # GH 9232 validation + + s0 = Series(range(5), name=0) + s1 = Series(range(5), name=1) + + # matching name and column gives standard frame + tm.assert_frame_equal(DataFrame(s0, columns=[0]), s0.to_frame()) + tm.assert_frame_equal(DataFrame(s1, columns=[1]), s1.to_frame()) + + # non-matching produces empty frame + assert DataFrame(s0, columns=[1]).empty + assert DataFrame(s1, columns=[0]).empty + + def test_constructor_Series_differently_indexed(self): + # name + s1 = Series([1, 2, 3], index=["a", "b", "c"], name="x") + + # no name + s2 = Series([1, 2, 3], index=["a", "b", "c"]) + + other_index = Index(["a", "b"]) + + df1 = DataFrame(s1, index=other_index) + exp1 = DataFrame(s1.reindex(other_index)) + assert df1.columns[0] == "x" + tm.assert_frame_equal(df1, exp1) + + df2 = DataFrame(s2, index=other_index) + exp2 = DataFrame(s2.reindex(other_index)) + assert df2.columns[0] == 0 + tm.assert_index_equal(df2.index, other_index) + tm.assert_frame_equal(df2, exp2) + + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, None), + ("idx", None, None, None), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + Index(["a", "b", "c"], name=name_in1), + Index(["b", "c", "d"], name=name_in2), + Index(["c", "d", "e"], name=name_in3), + ] + series = { + c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"]) + } + result = DataFrame(series) + + exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out) + expected = DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + + def test_constructor_manager_resize(self, float_frame): + index = list(float_frame.index[:5]) + columns = list(float_frame.columns[:3]) + + msg = "Passing a BlockManager to DataFrame" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + result = DataFrame(float_frame._mgr, index=index, columns=columns) + tm.assert_index_equal(result.index, Index(index)) + tm.assert_index_equal(result.columns, Index(columns)) + + def test_constructor_mix_series_nonseries(self, float_frame): + df = DataFrame( + {"A": float_frame["A"], "B": list(float_frame["B"])}, columns=["A", "B"] + ) + tm.assert_frame_equal(df, float_frame.loc[:, ["A", "B"]]) + + msg = "does not match index length" + with pytest.raises(ValueError, match=msg): + DataFrame({"A": float_frame["A"], "B": list(float_frame["B"])[:-2]}) + + def test_constructor_miscast_na_int_dtype(self): + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64) + + def test_constructor_column_duplicates(self): + # it works! #2079 + df = DataFrame([[8, 5]], columns=["a", "a"]) + edf = DataFrame([[8, 5]]) + edf.columns = ["a", "a"] + + tm.assert_frame_equal(df, edf) + + idf = DataFrame.from_records([(8, 5)], columns=["a", "a"]) + + tm.assert_frame_equal(idf, edf) + + def test_constructor_empty_with_string_dtype(self): + # GH 9428 + expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object) + + df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str) + tm.assert_frame_equal(df, expected) + df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_) + tm.assert_frame_equal(df, expected) + df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5") + tm.assert_frame_equal(df, expected) + + def test_constructor_empty_with_string_extension(self, nullable_string_dtype): + # GH 34915 + expected = DataFrame(columns=["c1"], dtype=nullable_string_dtype) + df = DataFrame(columns=["c1"], dtype=nullable_string_dtype) + tm.assert_frame_equal(df, expected) + + def test_constructor_single_value(self): + # expecting single value upcasting here + df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"]) + tm.assert_frame_equal( + df, DataFrame(np.zeros(df.shape).astype("float64"), df.index, df.columns) + ) + + df = DataFrame(0, index=[1, 2, 3], columns=["a", "b", "c"]) + tm.assert_frame_equal( + df, DataFrame(np.zeros(df.shape).astype("int64"), df.index, df.columns) + ) + + df = DataFrame("a", index=[1, 2], columns=["a", "c"]) + tm.assert_frame_equal( + df, + DataFrame( + np.array([["a", "a"], ["a", "a"]], dtype=object), + index=[1, 2], + columns=["a", "c"], + ), + ) + + msg = "DataFrame constructor not properly called!" + with pytest.raises(ValueError, match=msg): + DataFrame("a", [1, 2]) + with pytest.raises(ValueError, match=msg): + DataFrame("a", columns=["a", "c"]) + + msg = "incompatible data and dtype" + with pytest.raises(TypeError, match=msg): + DataFrame("a", [1, 2], ["a", "c"], float) + + def test_constructor_with_datetimes(self, using_infer_string): + intname = np.dtype(int).name + floatname = np.dtype(np.float64).name + objectname = np.dtype(np.object_).name + + # single item + df = DataFrame( + { + "A": 1, + "B": "foo", + "C": "bar", + "D": Timestamp("20010101"), + "E": datetime(2001, 1, 2, 0, 0), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("int64")] + + [np.dtype(objectname) if not using_infer_string else "string"] * 2 + + [np.dtype("M8[s]"), np.dtype("M8[us]")], + index=list("ABCDE"), + ) + tm.assert_series_equal(result, expected) + + # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 + # ndarray with a dtype specified) + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + floatname: np.array(1.0, dtype=floatname), + intname: np.array(1, dtype=intname), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("float64")] + + [np.dtype("int64")] + + [np.dtype("object") if not using_infer_string else "string"] + + [np.dtype("float64")] + + [np.dtype(intname)], + index=["a", "b", "c", floatname, intname], + ) + tm.assert_series_equal(result, expected) + + # check with ndarray construction ndim>0 + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + floatname: np.array([1.0] * 10, dtype=floatname), + intname: np.array([1] * 10, dtype=intname), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("float64")] + + [np.dtype("int64")] + + [np.dtype("object") if not using_infer_string else "string"] + + [np.dtype("float64")] + + [np.dtype(intname)], + index=["a", "b", "c", floatname, intname], + ) + tm.assert_series_equal(result, expected) + + def test_constructor_with_datetimes1(self): + # GH 2809 + ind = date_range(start="2000-01-01", freq="D", periods=10) + datetimes = [ts.to_pydatetime() for ts in ind] + datetime_s = Series(datetimes) + assert datetime_s.dtype == "M8[ns]" + + def test_constructor_with_datetimes2(self): + # GH 2810 + ind = date_range(start="2000-01-01", freq="D", periods=10) + datetimes = [ts.to_pydatetime() for ts in ind] + dates = [ts.date() for ts in ind] + df = DataFrame(datetimes, columns=["datetimes"]) + df["dates"] = dates + result = df.dtypes + expected = Series( + [np.dtype("datetime64[ns]"), np.dtype("object")], + index=["datetimes", "dates"], + ) + tm.assert_series_equal(result, expected) + + def test_constructor_with_datetimes3(self): + # GH 7594 + # don't coerce tz-aware + tz = pytz.timezone("US/Eastern") + dt = tz.localize(datetime(2012, 1, 1)) + + df = DataFrame({"End Date": dt}, index=[0]) + assert df.iat[0, 0] == dt + tm.assert_series_equal( + df.dtypes, Series({"End Date": "datetime64[us, US/Eastern]"}, dtype=object) + ) + + df = DataFrame([{"End Date": dt}]) + assert df.iat[0, 0] == dt + tm.assert_series_equal( + df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"}, dtype=object) + ) + + def test_constructor_with_datetimes4(self): + # tz-aware (UTC and other tz's) + # GH 8411 + dr = date_range("20130101", periods=3) + df = DataFrame({"value": dr}) + assert df.iat[0, 0].tz is None + dr = date_range("20130101", periods=3, tz="UTC") + df = DataFrame({"value": dr}) + assert str(df.iat[0, 0].tz) == "UTC" + dr = date_range("20130101", periods=3, tz="US/Eastern") + df = DataFrame({"value": dr}) + assert str(df.iat[0, 0].tz) == "US/Eastern" + + def test_constructor_with_datetimes5(self): + # GH 7822 + # preserver an index with a tz on dict construction + i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") + + expected = DataFrame({"a": i.to_series().reset_index(drop=True)}) + df = DataFrame() + df["a"] = i + tm.assert_frame_equal(df, expected) + + df = DataFrame({"a": i}) + tm.assert_frame_equal(df, expected) + + def test_constructor_with_datetimes6(self): + # multiples + i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") + i_no_tz = date_range("1/1/2011", periods=5, freq="10s") + df = DataFrame({"a": i, "b": i_no_tz}) + expected = DataFrame({"a": i.to_series().reset_index(drop=True), "b": i_no_tz}) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "arr", + [ + np.array([None, None, None, None, datetime.now(), None]), + np.array([None, None, datetime.now(), None]), + [[np.datetime64("NaT")], [None]], + [[np.datetime64("NaT")], [pd.NaT]], + [[None], [np.datetime64("NaT")]], + [[None], [pd.NaT]], + [[pd.NaT], [np.datetime64("NaT")]], + [[pd.NaT], [None]], + ], + ) + def test_constructor_datetimes_with_nulls(self, arr): + # gh-15869, GH#11220 + result = DataFrame(arr).dtypes + expected = Series([np.dtype("datetime64[ns]")]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("order", ["K", "A", "C", "F"]) + @pytest.mark.parametrize( + "unit", + ["M", "D", "h", "m", "s", "ms", "us", "ns"], + ) + def test_constructor_datetimes_non_ns(self, order, unit): + dtype = f"datetime64[{unit}]" + na = np.array( + [ + ["2015-01-01", "2015-01-02", "2015-01-03"], + ["2017-01-01", "2017-01-02", "2017-02-03"], + ], + dtype=dtype, + order=order, + ) + df = DataFrame(na) + expected = DataFrame(na.astype("M8[ns]")) + if unit in ["M", "D", "h", "m"]: + with pytest.raises(TypeError, match="Cannot cast"): + expected.astype(dtype) + + # instead the constructor casts to the closest supported reso, i.e. "s" + expected = expected.astype("datetime64[s]") + else: + expected = expected.astype(dtype=dtype) + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("order", ["K", "A", "C", "F"]) + @pytest.mark.parametrize( + "unit", + [ + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + ], + ) + def test_constructor_timedelta_non_ns(self, order, unit): + dtype = f"timedelta64[{unit}]" + na = np.array( + [ + [np.timedelta64(1, "D"), np.timedelta64(2, "D")], + [np.timedelta64(4, "D"), np.timedelta64(5, "D")], + ], + dtype=dtype, + order=order, + ) + df = DataFrame(na) + if unit in ["D", "h", "m"]: + # we get the nearest supported unit, i.e. "s" + exp_unit = "s" + else: + exp_unit = unit + exp_dtype = np.dtype(f"m8[{exp_unit}]") + expected = DataFrame( + [ + [Timedelta(1, "D"), Timedelta(2, "D")], + [Timedelta(4, "D"), Timedelta(5, "D")], + ], + dtype=exp_dtype, + ) + # TODO(2.0): ideally we should get the same 'expected' without passing + # dtype=exp_dtype. + tm.assert_frame_equal(df, expected) + + def test_constructor_for_list_with_dtypes(self, using_infer_string): + # test list of lists/ndarrays + df = DataFrame([np.arange(5) for x in range(5)]) + result = df.dtypes + expected = Series([np.dtype("int")] * 5) + tm.assert_series_equal(result, expected) + + df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)]) + result = df.dtypes + expected = Series([np.dtype("int32")] * 5) + tm.assert_series_equal(result, expected) + + # overflow issue? (we always expected int64 upcasting here) + df = DataFrame({"a": [2**31, 2**31 + 1]}) + assert df.dtypes.iloc[0] == np.dtype("int64") + + # GH #2751 (construction with no index specified), make sure we cast to + # platform values + df = DataFrame([1, 2]) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame([1.0, 2.0]) + assert df.dtypes.iloc[0] == np.dtype("float64") + + df = DataFrame({"a": [1, 2]}) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame({"a": [1.0, 2.0]}) + assert df.dtypes.iloc[0] == np.dtype("float64") + + df = DataFrame({"a": 1}, index=range(3)) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame({"a": 1.0}, index=range(3)) + assert df.dtypes.iloc[0] == np.dtype("float64") + + # with object list + df = DataFrame( + { + "a": [1, 2, 4, 7], + "b": [1.2, 2.3, 5.1, 6.3], + "c": list("abcd"), + "d": [datetime(2000, 1, 1) for i in range(4)], + "e": [1.0, 2, 4.0, 7], + } + ) + result = df.dtypes + expected = Series( + [ + np.dtype("int64"), + np.dtype("float64"), + np.dtype("object") if not using_infer_string else "string", + np.dtype("datetime64[ns]"), + np.dtype("float64"), + ], + index=list("abcde"), + ) + tm.assert_series_equal(result, expected) + + def test_constructor_frame_copy(self, float_frame): + cop = DataFrame(float_frame, copy=True) + cop["A"] = 5 + assert (cop["A"] == 5).all() + assert not (float_frame["A"] == 5).all() + + def test_constructor_frame_shallow_copy(self, float_frame): + # constructing a DataFrame from DataFrame with copy=False should still + # give a "shallow" copy (share data, not attributes) + # https://github.com/pandas-dev/pandas/issues/49523 + orig = float_frame.copy() + cop = DataFrame(float_frame) + assert cop._mgr is not float_frame._mgr + # Overwriting index of copy doesn't change original + cop.index = np.arange(len(cop)) + tm.assert_frame_equal(float_frame, orig) + + def test_constructor_ndarray_copy( + self, float_frame, using_array_manager, using_copy_on_write + ): + if not using_array_manager: + arr = float_frame.values.copy() + df = DataFrame(arr) + + arr[5] = 5 + if using_copy_on_write: + assert not (df.values[5] == 5).all() + else: + assert (df.values[5] == 5).all() + + df = DataFrame(arr, copy=True) + arr[6] = 6 + assert not (df.values[6] == 6).all() + else: + arr = float_frame.values.copy() + # default: copy to ensure contiguous arrays + df = DataFrame(arr) + assert df._mgr.arrays[0].flags.c_contiguous + arr[0, 0] = 100 + assert df.iloc[0, 0] != 100 + + # manually specify copy=False + df = DataFrame(arr, copy=False) + assert not df._mgr.arrays[0].flags.c_contiguous + arr[0, 0] = 1000 + assert df.iloc[0, 0] == 1000 + + def test_constructor_series_copy(self, float_frame): + series = float_frame._series + + df = DataFrame({"A": series["A"]}, copy=True) + # TODO can be replaced with `df.loc[:, "A"] = 5` after deprecation about + # inplace mutation is enforced + df.loc[df.index[0] : df.index[-1], "A"] = 5 + + assert not (series["A"] == 5).all() + + @pytest.mark.parametrize( + "df", + [ + DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan]), + DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan]), + DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]), + DataFrame( + [[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan] + ), + DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2]), + ], + ) + def test_constructor_with_nas(self, df): + # GH 5016 + # na's in indices + # GH 21428 (non-unique columns) + + for i in range(len(df.columns)): + df.iloc[:, i] + + indexer = np.arange(len(df.columns))[isna(df.columns)] + + # No NaN found -> error + if len(indexer) == 0: + with pytest.raises(KeyError, match="^nan$"): + df.loc[:, np.nan] + # single nan should result in Series + elif len(indexer) == 1: + tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan]) + # multiple nans should result in DataFrame + else: + tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan]) + + def test_constructor_lists_to_object_dtype(self): + # from #1074 + d = DataFrame({"a": [np.nan, False]}) + assert d["a"].dtype == np.object_ + assert not d["a"][1] + + def test_constructor_ndarray_categorical_dtype(self): + cat = Categorical(["A", "B", "C"]) + arr = np.array(cat).reshape(-1, 1) + arr = np.broadcast_to(arr, (3, 4)) + + result = DataFrame(arr, dtype=cat.dtype) + + expected = DataFrame({0: cat, 1: cat, 2: cat, 3: cat}) + tm.assert_frame_equal(result, expected) + + def test_constructor_categorical(self): + # GH8626 + + # dict creation + df = DataFrame({"A": list("abc")}, dtype="category") + expected = Series(list("abc"), dtype="category", name="A") + tm.assert_series_equal(df["A"], expected) + + # to_frame + s = Series(list("abc"), dtype="category") + result = s.to_frame() + expected = Series(list("abc"), dtype="category", name=0) + tm.assert_series_equal(result[0], expected) + result = s.to_frame(name="foo") + expected = Series(list("abc"), dtype="category", name="foo") + tm.assert_series_equal(result["foo"], expected) + + # list-like creation + df = DataFrame(list("abc"), dtype="category") + expected = Series(list("abc"), dtype="category", name=0) + tm.assert_series_equal(df[0], expected) + + def test_construct_from_1item_list_of_categorical(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + # ndim != 1 + cat = Categorical(list("abc")) + df = DataFrame([cat]) + expected = DataFrame([cat.astype(object)]) + tm.assert_frame_equal(df, expected) + + def test_construct_from_list_of_categoricals(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + + df = DataFrame([Categorical(list("abc")), Categorical(list("abd"))]) + expected = DataFrame([["a", "b", "c"], ["a", "b", "d"]]) + tm.assert_frame_equal(df, expected) + + def test_from_nested_listlike_mixed_types(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + # mixed + df = DataFrame([Categorical(list("abc")), list("def")]) + expected = DataFrame([["a", "b", "c"], ["d", "e", "f"]]) + tm.assert_frame_equal(df, expected) + + def test_construct_from_listlikes_mismatched_lengths(self): + df = DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))]) + expected = DataFrame([list("abc"), list("abdefg")]) + tm.assert_frame_equal(df, expected) + + def test_constructor_categorical_series(self): + items = [1, 2, 3, 1] + exp = Series(items).astype("category") + res = Series(items, dtype="category") + tm.assert_series_equal(res, exp) + + items = ["a", "b", "c", "a"] + exp = Series(items).astype("category") + res = Series(items, dtype="category") + tm.assert_series_equal(res, exp) + + # insert into frame with different index + # GH 8076 + index = date_range("20000101", periods=3) + expected = Series( + Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"]) + ) + expected.index = index + + expected = DataFrame({"x": expected}) + df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "dtype", + tm.ALL_NUMERIC_DTYPES + + tm.DATETIME64_DTYPES + + tm.TIMEDELTA64_DTYPES + + tm.BOOL_DTYPES, + ) + def test_check_dtype_empty_numeric_column(self, dtype): + # GH24386: Ensure dtypes are set correctly for an empty DataFrame. + # Empty DataFrame is generated via dictionary data with non-overlapping columns. + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + + assert data.b.dtype == dtype + + @pytest.mark.parametrize( + "dtype", tm.STRING_DTYPES + tm.BYTES_DTYPES + tm.OBJECT_DTYPES + ) + def test_check_dtype_empty_string_column(self, request, dtype, using_array_manager): + # GH24386: Ensure dtypes are set correctly for an empty DataFrame. + # Empty DataFrame is generated via dictionary data with non-overlapping columns. + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + + if using_array_manager and dtype in tm.BYTES_DTYPES: + # TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype + td.mark_array_manager_not_yet_implemented(request) + + assert data.b.dtype.name == "object" + + def test_to_frame_with_falsey_names(self): + # GH 16114 + result = Series(name=0, dtype=object).to_frame().dtypes + expected = Series({0: object}) + tm.assert_series_equal(result, expected) + + result = DataFrame(Series(name=0, dtype=object)).dtypes + tm.assert_series_equal(result, expected) + + @pytest.mark.arm_slow + @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) + def test_constructor_range_dtype(self, dtype): + expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64") + + # GH 26342 + result = DataFrame(range(5), columns=["A"], dtype=dtype) + tm.assert_frame_equal(result, expected) + + # GH 16804 + result = DataFrame({"A": range(5)}, dtype=dtype) + tm.assert_frame_equal(result, expected) + + def test_frame_from_list_subclass(self): + # GH21226 + class List(list): + pass + + expected = DataFrame([[1, 2, 3], [4, 5, 6]]) + result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "extension_arr", + [ + Categorical(list("aabbc")), + SparseArray([1, np.nan, np.nan, np.nan]), + IntervalArray([Interval(0, 1), Interval(1, 5)]), + PeriodArray(pd.period_range(start="1/1/2017", end="1/1/2018", freq="M")), + ], + ) + def test_constructor_with_extension_array(self, extension_arr): + # GH11363 + expected = DataFrame(Series(extension_arr)) + result = DataFrame(extension_arr) + tm.assert_frame_equal(result, expected) + + def test_datetime_date_tuple_columns_from_dict(self): + # GH 10863 + v = date.today() + tup = v, v + result = DataFrame({tup: Series(range(3), index=range(3))}, columns=[tup]) + expected = DataFrame([0, 1, 2], columns=Index(Series([tup]))) + tm.assert_frame_equal(result, expected) + + def test_construct_with_two_categoricalindex_series(self): + # GH 14600 + s1 = Series([39, 6, 4], index=CategoricalIndex(["female", "male", "unknown"])) + s2 = Series( + [2, 152, 2, 242, 150], + index=CategoricalIndex(["f", "female", "m", "male", "unknown"]), + ) + result = DataFrame([s1, s2]) + expected = DataFrame( + np.array([[39, 6, 4, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]]), + columns=["female", "male", "unknown", "f", "m"], + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_series_nonexact_categoricalindex(self): + # GH 42424 + ser = Series(range(100)) + ser1 = cut(ser, 10).value_counts().head(5) + ser2 = cut(ser, 10).value_counts().tail(5) + result = DataFrame({"1": ser1, "2": ser2}) + index = CategoricalIndex( + [ + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), + ], + ordered=True, + ) + expected = DataFrame( + {"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index + ) + tm.assert_frame_equal(expected, result) + + def test_from_M8_structured(self): + dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))] + arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")]) + df = DataFrame(arr) + + assert df["Date"][0] == dates[0][0] + assert df["Forecasting"][0] == dates[0][1] + + s = Series(arr["Date"]) + assert isinstance(s[0], Timestamp) + assert s[0] == dates[0][0] + + def test_from_datetime_subclass(self): + # GH21142 Verify whether Datetime subclasses are also of dtype datetime + class DatetimeSubclass(datetime): + pass + + data = DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]}) + assert data.datetime.dtype == "datetime64[ns]" + + def test_with_mismatched_index_length_raises(self): + # GH#33437 + dti = date_range("2016-01-01", periods=3, tz="US/Pacific") + msg = "Shape of passed values|Passed arrays should have the same length" + with pytest.raises(ValueError, match=msg): + DataFrame(dti, index=range(4)) + + def test_frame_ctor_datetime64_column(self): + rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") + dates = np.asarray(rng) + + df = DataFrame( + {"A": np.random.default_rng(2).standard_normal(len(rng)), "B": dates} + ) + assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]")) + + def test_dataframe_constructor_infer_multiindex(self): + index_lists = [["a", "a", "b", "b"], ["x", "y", "x", "y"]] + + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=[np.array(x) for x in index_lists], + ) + assert isinstance(multi.index, MultiIndex) + assert not isinstance(multi.columns, MultiIndex) + + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=index_lists + ) + assert isinstance(multi.columns, MultiIndex) + + @pytest.mark.parametrize( + "input_vals", + [ + ([1, 2]), + (["1", "2"]), + (list(date_range("1/1/2011", periods=2, freq="h"))), + (list(date_range("1/1/2011", periods=2, freq="h", tz="US/Eastern"))), + ([Interval(left=0, right=5)]), + ], + ) + def test_constructor_list_str(self, input_vals, string_dtype): + # GH#16605 + # Ensure that data elements are converted to strings when + # dtype is str, 'str', or 'U' + + result = DataFrame({"A": input_vals}, dtype=string_dtype) + expected = DataFrame({"A": input_vals}).astype({"A": string_dtype}) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_str_na(self, string_dtype): + result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype) + expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("copy", [False, True]) + def test_dict_nocopy( + self, + request, + copy, + any_numeric_ea_dtype, + any_numpy_dtype, + using_array_manager, + using_copy_on_write, + ): + if ( + using_array_manager + and not copy + and any_numpy_dtype not in tm.STRING_DTYPES + tm.BYTES_DTYPES + ): + # TODO(ArrayManager) properly honor copy keyword for dict input + td.mark_array_manager_not_yet_implemented(request) + + a = np.array([1, 2], dtype=any_numpy_dtype) + b = np.array([3, 4], dtype=any_numpy_dtype) + if b.dtype.kind in ["S", "U"]: + # These get cast, making the checks below more cumbersome + pytest.skip(f"{b.dtype} get cast, making the checks below more cumbersome") + + c = pd.array([1, 2], dtype=any_numeric_ea_dtype) + c_orig = c.copy() + df = DataFrame({"a": a, "b": b, "c": c}, copy=copy) + + def get_base(obj): + if isinstance(obj, np.ndarray): + return obj.base + elif isinstance(obj.dtype, np.dtype): + # i.e. DatetimeArray, TimedeltaArray + return obj._ndarray.base + else: + raise TypeError + + def check_views(c_only: bool = False): + # written to work for either BlockManager or ArrayManager + + # Check that the underlying data behind df["c"] is still `c` + # after setting with iloc. Since we don't know which entry in + # df._mgr.arrays corresponds to df["c"], we just check that exactly + # one of these arrays is `c`. GH#38939 + assert sum(x is c for x in df._mgr.arrays) == 1 + if c_only: + # If we ever stop consolidating in setitem_with_indexer, + # this will become unnecessary. + return + + assert ( + sum( + get_base(x) is a + for x in df._mgr.arrays + if isinstance(x.dtype, np.dtype) + ) + == 1 + ) + assert ( + sum( + get_base(x) is b + for x in df._mgr.arrays + if isinstance(x.dtype, np.dtype) + ) + == 1 + ) + + if not copy: + # constructor preserves views + check_views() + + # TODO: most of the rest of this test belongs in indexing tests + if lib.is_np_dtype(df.dtypes.iloc[0], "fciuO"): + warn = None + else: + warn = FutureWarning + with tm.assert_produces_warning(warn, match="incompatible dtype"): + df.iloc[0, 0] = 0 + df.iloc[0, 1] = 0 + if not copy: + check_views(True) + + # FIXME(GH#35417): until GH#35417, iloc.setitem into EA values does not preserve + # view, so we have to check in the other direction + df.iloc[:, 2] = pd.array([45, 46], dtype=c.dtype) + assert df.dtypes.iloc[2] == c.dtype + if not copy and not using_copy_on_write: + check_views(True) + + if copy: + if a.dtype.kind == "M": + assert a[0] == a.dtype.type(1, "ns") + assert b[0] == b.dtype.type(3, "ns") + else: + assert a[0] == a.dtype.type(1) + assert b[0] == b.dtype.type(3) + # FIXME(GH#35417): enable after GH#35417 + assert c[0] == c_orig[0] # i.e. df.iloc[0, 2]=45 did *not* update c + elif not using_copy_on_write: + # TODO: we can call check_views if we stop consolidating + # in setitem_with_indexer + assert c[0] == 45 # i.e. df.iloc[0, 2]=45 *did* update c + # TODO: we can check b[0] == 0 if we stop consolidating in + # setitem_with_indexer (except for datetimelike?) + + def test_construct_from_dict_ea_series(self): + # GH#53744 - default of copy=True should also apply for Series with + # extension dtype + ser = Series([1, 2, 3], dtype="Int64") + df = DataFrame({"a": ser}) + assert not np.shares_memory(ser.values._data, df["a"].values._data) + + def test_from_series_with_name_with_columns(self): + # GH 7893 + result = DataFrame(Series(1, name="foo"), columns=["bar"]) + expected = DataFrame(columns=["bar"]) + tm.assert_frame_equal(result, expected) + + def test_nested_list_columns(self): + # GH 14467 + result = DataFrame( + [[1, 2, 3], [4, 5, 6]], columns=[["A", "A", "A"], ["a", "b", "c"]] + ) + expected = DataFrame( + [[1, 2, 3], [4, 5, 6]], + columns=MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]), + ) + tm.assert_frame_equal(result, expected) + + def test_from_2d_object_array_of_periods_or_intervals(self): + # Period analogue to GH#26825 + pi = pd.period_range("2016-04-05", periods=3) + data = pi._data.astype(object).reshape(1, -1) + df = DataFrame(data) + assert df.shape == (1, 3) + assert (df.dtypes == pi.dtype).all() + assert (df == pi).all().all() + + ii = pd.IntervalIndex.from_breaks([3, 4, 5, 6]) + data2 = ii._data.astype(object).reshape(1, -1) + df2 = DataFrame(data2) + assert df2.shape == (1, 3) + assert (df2.dtypes == ii.dtype).all() + assert (df2 == ii).all().all() + + # mixed + data3 = np.r_[data, data2, data, data2].T + df3 = DataFrame(data3) + expected = DataFrame({0: pi, 1: ii, 2: pi, 3: ii}) + tm.assert_frame_equal(df3, expected) + + @pytest.mark.parametrize( + "col_a, col_b", + [ + ([[1], [2]], np.array([[1], [2]])), + (np.array([[1], [2]]), [[1], [2]]), + (np.array([[1], [2]]), np.array([[1], [2]])), + ], + ) + def test_error_from_2darray(self, col_a, col_b): + msg = "Per-column arrays must each be 1-dimensional" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": col_a, "b": col_b}) + + def test_from_dict_with_missing_copy_false(self): + # GH#45369 filled columns should not be views of one another + df = DataFrame(index=[1, 2, 3], columns=["a", "b", "c"], copy=False) + assert not np.shares_memory(df["a"]._values, df["b"]._values) + + df.iloc[0, 0] = 0 + expected = DataFrame( + { + "a": [0, np.nan, np.nan], + "b": [np.nan, np.nan, np.nan], + "c": [np.nan, np.nan, np.nan], + }, + index=[1, 2, 3], + dtype=object, + ) + tm.assert_frame_equal(df, expected) + + def test_construction_empty_array_multi_column_raises(self): + # GH#46822 + msg = r"Shape of passed values is \(0, 1\), indices imply \(0, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(data=np.array([]), columns=["a", "b"]) + + def test_construct_with_strings_and_none(self): + # GH#32218 + df = DataFrame(["1", "2", None], columns=["a"], dtype="str") + expected = DataFrame({"a": ["1", "2", None]}, dtype="str") + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference(self): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"]}, + dtype=dtype, + columns=Index(["a"], dtype=dtype), + index=Index(["x", "y"], dtype=dtype), + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}, index=["x", "y"]) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", 1]}, dtype="object", columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", 1]}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"]}, dtype="object", columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}, dtype="object") + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference_array_string_dtype(self): + # GH#54496 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": np.array(["a", "b"])}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame({0: ["a", "b"], 1: ["c", "d"]}, dtype=dtype) + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["a", "c"], ["b", "d"]])) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"], "b": ["c", "d"]}, + dtype=dtype, + columns=Index(["a", "b"], dtype=dtype), + ) + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["a", "c"], ["b", "d"]]), columns=["a", "b"]) + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference_block_dim(self): + # GH#55363 + pytest.importorskip("pyarrow") + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) + assert df._mgr.blocks[0].ndim == 2 + + def test_inference_on_pandas_objects(self): + # GH#56012 + idx = Index([Timestamp("2019-12-31")], dtype=object) + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + result = DataFrame(idx, columns=["a"]) + assert result.dtypes.iloc[0] != np.object_ + result = DataFrame({"a": idx}) + assert result.dtypes.iloc[0] == np.object_ + + ser = Series([Timestamp("2019-12-31")], dtype=object) + + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + result = DataFrame(ser, columns=["a"]) + assert result.dtypes.iloc[0] != np.object_ + result = DataFrame({"a": ser}) + assert result.dtypes.iloc[0] == np.object_ + + +class TestDataFrameConstructorIndexInference: + def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self): + rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M") + s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1) + + rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M") + s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2) + df = DataFrame({"s1": s1, "s2": s2}) + + exp = pd.period_range("1/1/1980", "1/1/2012", freq="M") + tm.assert_index_equal(df.index, exp) + + def test_frame_from_dict_with_mixed_tzaware_indexes(self): + # GH#44091 + dti = date_range("2016-01-01", periods=3) + + ser1 = Series(range(3), index=dti) + ser2 = Series(range(3), index=dti.tz_localize("UTC")) + ser3 = Series(range(3), index=dti.tz_localize("US/Central")) + ser4 = Series(range(3)) + + # no tz-naive, but we do have mixed tzs and a non-DTI + df1 = DataFrame({"A": ser2, "B": ser3, "C": ser4}) + exp_index = Index( + list(ser2.index) + list(ser3.index) + list(ser4.index), dtype=object + ) + tm.assert_index_equal(df1.index, exp_index) + + df2 = DataFrame({"A": ser2, "C": ser4, "B": ser3}) + exp_index3 = Index( + list(ser2.index) + list(ser4.index) + list(ser3.index), dtype=object + ) + tm.assert_index_equal(df2.index, exp_index3) + + df3 = DataFrame({"B": ser3, "A": ser2, "C": ser4}) + exp_index3 = Index( + list(ser3.index) + list(ser2.index) + list(ser4.index), dtype=object + ) + tm.assert_index_equal(df3.index, exp_index3) + + df4 = DataFrame({"C": ser4, "B": ser3, "A": ser2}) + exp_index4 = Index( + list(ser4.index) + list(ser3.index) + list(ser2.index), dtype=object + ) + tm.assert_index_equal(df4.index, exp_index4) + + # TODO: not clear if these raising is desired (no extant tests), + # but this is de facto behavior 2021-12-22 + msg = "Cannot join tz-naive with tz-aware DatetimeIndex" + with pytest.raises(TypeError, match=msg): + DataFrame({"A": ser2, "B": ser3, "C": ser4, "D": ser1}) + with pytest.raises(TypeError, match=msg): + DataFrame({"A": ser2, "B": ser3, "D": ser1}) + with pytest.raises(TypeError, match=msg): + DataFrame({"D": ser1, "A": ser2, "B": ser3}) + + @pytest.mark.parametrize( + "key_val, col_vals, col_type", + [ + ["3", ["3", "4"], "utf8"], + [3, [3, 4], "int8"], + ], + ) + def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type): + # GH 53617 + pa = pytest.importorskip("pyarrow") + cols = pd.arrays.ArrowExtensionArray( + pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)())) + ) + result = DataFrame({key_val: [1, 2]}, columns=cols) + expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols) + expected.isetitem(1, expected.iloc[:, 1].astype(object)) + tm.assert_frame_equal(result, expected) + + +class TestDataFrameConstructorWithDtypeCoercion: + def test_floating_values_integer_dtype(self): + # GH#40110 make DataFrame behavior with arraylike floating data and + # inty dtype match Series behavior + + arr = np.random.default_rng(2).standard_normal((10, 5)) + + # GH#49599 in 2.0 we raise instead of either + # a) silently ignoring dtype and returningfloat (the old Series behavior) or + # b) rounding (the old DataFrame behavior) + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + DataFrame(arr, dtype="i8") + + df = DataFrame(arr.round(), dtype="i8") + assert (df.dtypes == "i8").all() + + # with NaNs, we go through a different path with a different warning + arr[0, 0] = np.nan + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr, dtype="i8") + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0], dtype="i8") + # The future (raising) behavior matches what we would get via astype: + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr).astype("i8") + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0]).astype("i8") + + +class TestDataFrameConstructorWithDatetimeTZ: + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_construction_preserves_tzaware_dtypes(self, tz): + # after GH#7822 + # these retain the timezones on dict construction + dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI") + dr_tz = dr.tz_localize(tz) + df = DataFrame({"A": "foo", "B": dr_tz}, index=dr) + tz_expected = DatetimeTZDtype("ns", dr_tz.tzinfo) + assert df["B"].dtype == tz_expected + + # GH#2810 (with timezones) + datetimes_naive = [ts.to_pydatetime() for ts in dr] + datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] + df = DataFrame({"dr": dr}) + df["dr_tz"] = dr_tz + df["datetimes_naive"] = datetimes_naive + df["datetimes_with_tz"] = datetimes_with_tz + result = df.dtypes + expected = Series( + [ + np.dtype("datetime64[ns]"), + DatetimeTZDtype(tz=tz), + np.dtype("datetime64[ns]"), + DatetimeTZDtype(tz=tz), + ], + index=["dr", "dr_tz", "datetimes_naive", "datetimes_with_tz"], + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("pydt", [True, False]) + def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt): + # GH#25843, GH#41555, GH#33401 + tz = tz_aware_fixture + ts = Timestamp("2019", tz=tz) + if pydt: + ts = ts.to_pydatetime() + + msg = ( + "Cannot convert timezone-aware data to timezone-naive dtype. " + r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." + ) + with pytest.raises(ValueError, match=msg): + DataFrame({0: [ts]}, dtype="datetime64[ns]") + + msg2 = "Cannot unbox tzaware Timestamp to tznaive dtype" + with pytest.raises(TypeError, match=msg2): + DataFrame({0: ts}, index=[0], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([ts], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame(np.array([ts], dtype=object), dtype="datetime64[ns]") + + with pytest.raises(TypeError, match=msg2): + DataFrame(ts, index=[0], columns=[0], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([Series([ts])], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([[ts]], columns=[0], dtype="datetime64[ns]") + + def test_from_dict(self): + # 8260 + # support datetime64 with tz + + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + dr = date_range("20130110", periods=3) + + # construction + df = DataFrame({"A": idx, "B": dr}) + assert df["A"].dtype, "M8[ns, US/Eastern" + assert df["A"].name == "A" + tm.assert_series_equal(df["A"], Series(idx, name="A")) + tm.assert_series_equal(df["B"], Series(dr, name="B")) + + def test_from_index(self): + # from index + idx2 = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + df2 = DataFrame(idx2) + tm.assert_series_equal(df2["foo"], Series(idx2, name="foo")) + df2 = DataFrame(Series(idx2)) + tm.assert_series_equal(df2["foo"], Series(idx2, name="foo")) + + idx2 = date_range("20130101", periods=3, tz="US/Eastern") + df2 = DataFrame(idx2) + tm.assert_series_equal(df2[0], Series(idx2, name=0)) + df2 = DataFrame(Series(idx2)) + tm.assert_series_equal(df2[0], Series(idx2, name=0)) + + def test_frame_dict_constructor_datetime64_1680(self): + dr = date_range("1/1/2012", periods=10) + s = Series(dr, index=dr) + + # it works! + DataFrame({"a": "foo", "b": s}, index=dr) + DataFrame({"a": "foo", "b": s.values}, index=dr) + + def test_frame_datetime64_mixed_index_ctor_1681(self): + dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI") + ts = Series(dr) + + # it works! + d = DataFrame({"A": "foo", "B": ts}, index=dr) + assert d["B"].isna().all() + + def test_frame_timeseries_column(self): + # GH19157 + dr = date_range( + start="20130101T10:00:00", periods=3, freq="min", tz="US/Eastern" + ) + result = DataFrame(dr, columns=["timestamps"]) + expected = DataFrame( + { + "timestamps": [ + Timestamp("20130101T10:00:00", tz="US/Eastern"), + Timestamp("20130101T10:01:00", tz="US/Eastern"), + Timestamp("20130101T10:02:00", tz="US/Eastern"), + ] + } + ) + tm.assert_frame_equal(result, expected) + + def test_nested_dict_construction(self): + # GH22227 + columns = ["Nevada", "Ohio"] + pop = { + "Nevada": {2001: 2.4, 2002: 2.9}, + "Ohio": {2000: 1.5, 2001: 1.7, 2002: 3.6}, + } + result = DataFrame(pop, index=[2001, 2002, 2003], columns=columns) + expected = DataFrame( + [(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)], + columns=columns, + index=Index([2001, 2002, 2003]), + ) + tm.assert_frame_equal(result, expected) + + def test_from_tzaware_object_array(self): + # GH#26825 2D object array of tzaware timestamps should not raise + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + data = dti._data.astype(object).reshape(1, -1) + df = DataFrame(data) + assert df.shape == (1, 3) + assert (df.dtypes == dti.dtype).all() + assert (df == dti).all().all() + + def test_from_tzaware_mixed_object_array(self): + # GH#26825 + arr = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + pd.NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + pd.NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ], + dtype=object, + ).T + res = DataFrame(arr, columns=["A", "B", "C"]) + + expected_dtypes = [ + "datetime64[ns]", + "datetime64[ns, US/Eastern]", + "datetime64[ns, CET]", + ] + assert (res.dtypes == expected_dtypes).all() + + def test_from_2d_ndarray_with_dtype(self): + # GH#12513 + array_dim2 = np.arange(10).reshape((5, 2)) + df = DataFrame(array_dim2, dtype="datetime64[ns, UTC]") + + expected = DataFrame(array_dim2).astype("datetime64[ns, UTC]") + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("typ", [set, frozenset]) + def test_construction_from_set_raises(self, typ): + # https://github.com/pandas-dev/pandas/issues/32582 + values = typ({1, 2, 3}) + msg = f"'{typ.__name__}' type is unordered" + with pytest.raises(TypeError, match=msg): + DataFrame({"a": values}) + + with pytest.raises(TypeError, match=msg): + Series(values) + + def test_construction_from_ndarray_datetimelike(self): + # ensure the underlying arrays are properly wrapped as EA when + # constructed from 2D ndarray + arr = np.arange(0, 12, dtype="datetime64[ns]").reshape(4, 3) + df = DataFrame(arr) + assert all(isinstance(arr, DatetimeArray) for arr in df._mgr.arrays) + + def test_construction_from_ndarray_with_eadtype_mismatched_columns(self): + arr = np.random.default_rng(2).standard_normal((10, 2)) + dtype = pd.array([2.0]).dtype + msg = r"len\(arrays\) must match len\(columns\)" + with pytest.raises(ValueError, match=msg): + DataFrame(arr, columns=["foo"], dtype=dtype) + + arr2 = pd.array([2.0, 3.0, 4.0]) + with pytest.raises(ValueError, match=msg): + DataFrame(arr2, columns=["foo", "bar"]) + + def test_columns_indexes_raise_on_sets(self): + # GH 47215 + data = [[1, 2, 3], [4, 5, 6]] + with pytest.raises(ValueError, match="index cannot be a set"): + DataFrame(data, index={"a", "b"}) + with pytest.raises(ValueError, match="columns cannot be a set"): + DataFrame(data, columns={"a", "b", "c"}) + + # TODO: make this not cast to object in pandas 3.0 + @pytest.mark.skipif( + not np_version_gt2, reason="StringDType only available in numpy 2 and above" + ) + @pytest.mark.parametrize( + "data", + [ + {"a": ["a", "b", "c"], "b": [1.0, 2.0, 3.0], "c": ["d", "e", "f"]}, + ], + ) + def test_np_string_array_object_cast(self, data): + from numpy.dtypes import StringDType + + data["a"] = np.array(data["a"], dtype=StringDType()) + res = DataFrame(data) + assert res["a"].dtype == np.object_ + assert (res["a"] == data["a"]).all() + + +def get1(obj): # TODO: make a helper in tm? + if isinstance(obj, Series): + return obj.iloc[0] + else: + return obj.iloc[0, 0] + + +class TestFromScalar: + @pytest.fixture(params=[list, dict, None]) + def box(self, request): + return request.param + + @pytest.fixture + def constructor(self, frame_or_series, box): + extra = {"index": range(2)} + if frame_or_series is DataFrame: + extra["columns"] = ["A"] + + if box is None: + return functools.partial(frame_or_series, **extra) + + elif box is dict: + if frame_or_series is Series: + return lambda x, **kwargs: frame_or_series( + {0: x, 1: x}, **extra, **kwargs + ) + else: + return lambda x, **kwargs: frame_or_series({"A": x}, **extra, **kwargs) + elif frame_or_series is Series: + return lambda x, **kwargs: frame_or_series([x, x], **extra, **kwargs) + else: + return lambda x, **kwargs: frame_or_series({"A": [x, x]}, **extra, **kwargs) + + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_from_nat_scalar(self, dtype, constructor): + obj = constructor(pd.NaT, dtype=dtype) + assert np.all(obj.dtypes == dtype) + assert np.all(obj.isna()) + + def test_from_timedelta_scalar_preserves_nanos(self, constructor): + td = Timedelta(1) + + obj = constructor(td, dtype="m8[ns]") + assert get1(obj) == td + + def test_from_timestamp_scalar_preserves_nanos(self, constructor, fixed_now_ts): + ts = fixed_now_ts + Timedelta(1) + + obj = constructor(ts, dtype="M8[ns]") + assert get1(obj) == ts + + def test_from_timedelta64_scalar_object(self, constructor): + td = Timedelta(1) + td64 = td.to_timedelta64() + + obj = constructor(td64, dtype=object) + assert isinstance(get1(obj), np.timedelta64) + + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_from_scalar_datetimelike_mismatched(self, constructor, cls): + scalar = cls("NaT", "ns") + dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls] + + if cls is np.datetime64: + msg1 = "Invalid type for timedelta scalar: " + else: + msg1 = " is not convertible to datetime" + msg = "|".join(["Cannot cast", msg1]) + + with pytest.raises(TypeError, match=msg): + constructor(scalar, dtype=dtype) + + scalar = cls(4, "ns") + with pytest.raises(TypeError, match=msg): + constructor(scalar, dtype=dtype) + + @pytest.mark.parametrize("cls", [datetime, np.datetime64]) + def test_from_out_of_bounds_ns_datetime( + self, constructor, cls, request, box, frame_or_series + ): + # scalar that won't fit in nanosecond dt64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="Timestamp constructor has been updated to cast dt64 to " + "non-nano, but DatetimeArray._from_sequence has not", + strict=True, + ) + request.applymarker(mark) + + scalar = datetime(9999, 1, 1) + exp_dtype = "M8[us]" # pydatetime objects default to this reso + + if cls is np.datetime64: + scalar = np.datetime64(scalar, "D") + exp_dtype = "M8[s]" # closest reso to input + result = constructor(scalar) + + item = get1(result) + dtype = tm.get_dtype(result) + + assert type(item) is Timestamp + assert item.asm8.dtype == exp_dtype + assert dtype == exp_dtype + + @pytest.mark.skip_ubsan + def test_out_of_s_bounds_datetime64(self, constructor): + scalar = np.datetime64(np.iinfo(np.int64).max, "D") + result = constructor(scalar) + item = get1(result) + assert type(item) is np.datetime64 + dtype = tm.get_dtype(result) + assert dtype == object + + @pytest.mark.parametrize("cls", [timedelta, np.timedelta64]) + def test_from_out_of_bounds_ns_timedelta( + self, constructor, cls, request, box, frame_or_series + ): + # scalar that won't fit in nanosecond td64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="TimedeltaArray constructor has been updated to cast td64 " + "to non-nano, but TimedeltaArray._from_sequence has not", + strict=True, + ) + request.applymarker(mark) + + scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1) + exp_dtype = "m8[us]" # smallest reso that fits + if cls is np.timedelta64: + scalar = np.timedelta64(scalar, "D") + exp_dtype = "m8[s]" # closest reso to input + result = constructor(scalar) + + item = get1(result) + dtype = tm.get_dtype(result) + + assert type(item) is Timedelta + assert item.asm8.dtype == exp_dtype + assert dtype == exp_dtype + + @pytest.mark.skip_ubsan + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_out_of_s_bounds_timedelta64(self, constructor, cls): + scalar = cls(np.iinfo(np.int64).max, "D") + result = constructor(scalar) + item = get1(result) + assert type(item) is cls + dtype = tm.get_dtype(result) + assert dtype == object + + def test_tzaware_data_tznaive_dtype(self, constructor, box, frame_or_series): + tz = "US/Eastern" + ts = Timestamp("2019", tz=tz) + + if box is None or (frame_or_series is DataFrame and box is dict): + msg = "Cannot unbox tzaware Timestamp to tznaive dtype" + err = TypeError + else: + msg = ( + "Cannot convert timezone-aware data to timezone-naive dtype. " + r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." + ) + err = ValueError + + with pytest.raises(err, match=msg): + constructor(ts, dtype="M8[ns]") + + +# TODO: better location for this test? +class TestAllowNonNano: + # Until 2.0, we do not preserve non-nano dt64/td64 when passed as ndarray, + # but do preserve it when passed as DTA/TDA + + @pytest.fixture(params=[True, False]) + def as_td(self, request): + return request.param + + @pytest.fixture + def arr(self, as_td): + values = np.arange(5).astype(np.int64).view("M8[s]") + if as_td: + values = values - values[0] + return TimedeltaArray._simple_new(values, dtype=values.dtype) + else: + return DatetimeArray._simple_new(values, dtype=values.dtype) + + def test_index_allow_non_nano(self, arr): + idx = Index(arr) + assert idx.dtype == arr.dtype + + def test_dti_tdi_allow_non_nano(self, arr, as_td): + if as_td: + idx = pd.TimedeltaIndex(arr) + else: + idx = DatetimeIndex(arr) + assert idx.dtype == arr.dtype + + def test_series_allow_non_nano(self, arr): + ser = Series(arr) + assert ser.dtype == arr.dtype + + def test_frame_allow_non_nano(self, arr): + df = DataFrame(arr) + assert df.dtypes[0] == arr.dtype + + def test_frame_from_dict_allow_non_nano(self, arr): + df = DataFrame({0: arr}) + assert df.dtypes[0] == arr.dtype diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_cumulative.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_cumulative.py new file mode 100644 index 0000000000000000000000000000000000000000..5bd9c426123159fcfcf6bf5289fd08a60dfd91b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_cumulative.py @@ -0,0 +1,81 @@ +""" +Tests for DataFrame cumulative operations + +See also +-------- +tests.series.test_cumulative +""" + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameCumulativeOps: + # --------------------------------------------------------------------- + # Cumulative Operations - cumsum, cummax, ... + + def test_cumulative_ops_smoke(self): + # it works + df = DataFrame({"A": np.arange(20)}, index=np.arange(20)) + df.cummax() + df.cummin() + df.cumsum() + + dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5)) + # TODO(wesm): do something with this? + dm.cumsum() + + def test_cumprod_smoke(self, datetime_frame): + datetime_frame.iloc[5:10, 0] = np.nan + datetime_frame.iloc[10:15, 1] = np.nan + datetime_frame.iloc[15:, 2] = np.nan + + # ints + df = datetime_frame.fillna(0).astype(int) + df.cumprod(0) + df.cumprod(1) + + # ints32 + df = datetime_frame.fillna(0).astype(np.int32) + df.cumprod(0) + df.cumprod(1) + + @pytest.mark.parametrize("method", ["cumsum", "cumprod", "cummin", "cummax"]) + def test_cumulative_ops_match_series_apply(self, datetime_frame, method): + datetime_frame.iloc[5:10, 0] = np.nan + datetime_frame.iloc[10:15, 1] = np.nan + datetime_frame.iloc[15:, 2] = np.nan + + # axis = 0 + result = getattr(datetime_frame, method)() + expected = datetime_frame.apply(getattr(Series, method)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = getattr(datetime_frame, method)(axis=1) + expected = datetime_frame.apply(getattr(Series, method), axis=1) + tm.assert_frame_equal(result, expected) + + # fix issue TODO: GH ref? + assert np.shape(result) == np.shape(datetime_frame) + + def test_cumsum_preserve_dtypes(self): + # GH#19296 dont incorrectly upcast to object + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]}) + + result = df.cumsum() + + expected = DataFrame( + { + "A": Series([1, 3, 6], dtype=np.int64), + "B": Series([1, 3, 6], dtype=np.float64), + "C": df["C"].cumsum(), + } + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_iteration.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_iteration.py new file mode 100644 index 0000000000000000000000000000000000000000..a1c23ff05f3e19aca490444216ec295453483e80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_iteration.py @@ -0,0 +1,160 @@ +import datetime + +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) + +from pandas import ( + Categorical, + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestIteration: + def test_keys(self, float_frame): + assert float_frame.keys() is float_frame.columns + + def test_iteritems(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) + for k, v in df.items(): + assert isinstance(v, DataFrame._constructor_sliced) + + def test_items(self): + # GH#17213, GH#13918 + cols = ["a", "b", "c"] + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols) + for c, (k, v) in zip(cols, df.items()): + assert c == k + assert isinstance(v, Series) + assert (df[k] == v).all() + + def test_items_names(self, float_string_frame): + for k, v in float_string_frame.items(): + assert v.name == k + + def test_iter(self, float_frame): + assert list(float_frame) == list(float_frame.columns) + + def test_iterrows(self, float_frame, float_string_frame): + for k, v in float_frame.iterrows(): + exp = float_frame.loc[k] + tm.assert_series_equal(v, exp) + + for k, v in float_string_frame.iterrows(): + exp = float_string_frame.loc[k] + tm.assert_series_equal(v, exp) + + def test_iterrows_iso8601(self): + # GH#19671 + s = DataFrame( + { + "non_iso8601": ["M1701", "M1802", "M1903", "M2004"], + "iso8601": date_range("2000-01-01", periods=4, freq="ME"), + } + ) + for k, v in s.iterrows(): + exp = s.loc[k] + tm.assert_series_equal(v, exp) + + def test_iterrows_corner(self): + # GH#12222 + df = DataFrame( + { + "a": [datetime.datetime(2015, 1, 1)], + "b": [None], + "c": [None], + "d": [""], + "e": [[]], + "f": [set()], + "g": [{}], + } + ) + expected = Series( + [datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}], + index=list("abcdefg"), + name=0, + dtype="object", + ) + _, result = next(df.iterrows()) + tm.assert_series_equal(result, expected) + + def test_itertuples(self, float_frame): + for i, tup in enumerate(float_frame.itertuples()): + ser = DataFrame._constructor_sliced(tup[1:]) + ser.name = tup[0] + expected = float_frame.iloc[i, :].reset_index(drop=True) + tm.assert_series_equal(ser, expected) + + def test_itertuples_index_false(self): + df = DataFrame( + {"floats": np.random.default_rng(2).standard_normal(5), "ints": range(5)}, + columns=["floats", "ints"], + ) + + for tup in df.itertuples(index=False): + assert isinstance(tup[1], int) + + def test_itertuples_duplicate_cols(self): + df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) + dfaa = df[["a", "a"]] + + assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)] + + # repr with int on 32-bit/windows + if not (is_platform_windows() or not IS64): + assert ( + repr(list(df.itertuples(name=None))) + == "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]" + ) + + def test_itertuples_tuple_name(self): + df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) + tup = next(df.itertuples(name="TestName")) + assert tup._fields == ("Index", "a", "b") + assert (tup.Index, tup.a, tup.b) == tup + assert type(tup).__name__ == "TestName" + + def test_itertuples_disallowed_col_labels(self): + df = DataFrame(data={"def": [1, 2, 3], "return": [4, 5, 6]}) + tup2 = next(df.itertuples(name="TestName")) + assert tup2 == (0, 1, 4) + assert tup2._fields == ("Index", "_1", "_2") + + @pytest.mark.parametrize("limit", [254, 255, 1024]) + @pytest.mark.parametrize("index", [True, False]) + def test_itertuples_py2_3_field_limit_namedtuple(self, limit, index): + # GH#28282 + df = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(limit)}]) + result = next(df.itertuples(index=index)) + assert isinstance(result, tuple) + assert hasattr(result, "_fields") + + def test_sequence_like_with_categorical(self): + # GH#7839 + # make sure can iterate + df = DataFrame( + {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]} + ) + df["grade"] = Categorical(df["raw_grade"]) + + # basic sequencing testing + result = list(df.grade.values) + expected = np.array(df.grade.values).tolist() + tm.assert_almost_equal(result, expected) + + # iteration + for t in df.itertuples(index=False): + str(t) + + for row, s in df.iterrows(): + str(s) + + for c, col in df.items(): + str(col) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_logical_ops.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_logical_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..16ca3a202f1e0242104d9516b4dd2318599cefef --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_logical_ops.py @@ -0,0 +1,218 @@ +import operator +import re + +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DataFrame, + Interval, + Series, + isnull, +) +import pandas._testing as tm + + +class TestDataFrameLogicalOperators: + # &, |, ^ + + @pytest.mark.parametrize( + "left, right, op, expected", + [ + ( + [True, False, np.nan], + [True, False, True], + operator.and_, + [True, False, False], + ), + ( + [True, False, True], + [True, False, np.nan], + operator.and_, + [True, False, False], + ), + ( + [True, False, np.nan], + [True, False, True], + operator.or_, + [True, False, False], + ), + ( + [True, False, True], + [True, False, np.nan], + operator.or_, + [True, False, True], + ), + ], + ) + def test_logical_operators_nans(self, left, right, op, expected, frame_or_series): + # GH#13896 + result = op(frame_or_series(left), frame_or_series(right)) + expected = frame_or_series(expected) + + tm.assert_equal(result, expected) + + def test_logical_ops_empty_frame(self): + # GH#5808 + # empty frames, non-mixed dtype + df = DataFrame(index=[1]) + + result = df & df + tm.assert_frame_equal(result, df) + + result = df | df + tm.assert_frame_equal(result, df) + + df2 = DataFrame(index=[1, 2]) + result = df & df2 + tm.assert_frame_equal(result, df2) + + dfa = DataFrame(index=[1], columns=["A"]) + + result = dfa & dfa + expected = DataFrame(False, index=[1], columns=["A"]) + tm.assert_frame_equal(result, expected) + + def test_logical_ops_bool_frame(self): + # GH#5808 + df1a_bool = DataFrame(True, index=[1], columns=["A"]) + + result = df1a_bool & df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + result = df1a_bool | df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + def test_logical_ops_int_frame(self): + # GH#5808 + df1a_int = DataFrame(1, index=[1], columns=["A"]) + df1a_bool = DataFrame(True, index=[1], columns=["A"]) + + result = df1a_int | df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + # Check that this matches Series behavior + res_ser = df1a_int["A"] | df1a_bool["A"] + tm.assert_series_equal(res_ser, df1a_bool["A"]) + + def test_logical_ops_invalid(self, using_infer_string): + # GH#5808 + + df1 = DataFrame(1.0, index=[1], columns=["A"]) + df2 = DataFrame(True, index=[1], columns=["A"]) + msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'") + with pytest.raises(TypeError, match=msg): + df1 | df2 + + df1 = DataFrame("foo", index=[1], columns=["A"]) + df2 = DataFrame(True, index=[1], columns=["A"]) + msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'") + if using_infer_string: + import pyarrow as pa + + with pytest.raises(pa.lib.ArrowNotImplementedError, match="|has no kernel"): + df1 | df2 + else: + with pytest.raises(TypeError, match=msg): + df1 | df2 + + def test_logical_operators(self): + def _check_bin_op(op): + result = op(df1, df2) + expected = DataFrame( + op(df1.values, df2.values), index=df1.index, columns=df1.columns + ) + assert result.values.dtype == np.bool_ + tm.assert_frame_equal(result, expected) + + def _check_unary_op(op): + result = op(df1) + expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns) + assert result.values.dtype == np.bool_ + tm.assert_frame_equal(result, expected) + + df1 = { + "a": {"a": True, "b": False, "c": False, "d": True, "e": True}, + "b": {"a": False, "b": True, "c": False, "d": False, "e": False}, + "c": {"a": False, "b": False, "c": True, "d": False, "e": False}, + "d": {"a": True, "b": False, "c": False, "d": True, "e": True}, + "e": {"a": True, "b": False, "c": False, "d": True, "e": True}, + } + + df2 = { + "a": {"a": True, "b": False, "c": True, "d": False, "e": False}, + "b": {"a": False, "b": True, "c": False, "d": False, "e": False}, + "c": {"a": True, "b": False, "c": True, "d": False, "e": False}, + "d": {"a": False, "b": False, "c": False, "d": True, "e": False}, + "e": {"a": False, "b": False, "c": False, "d": False, "e": True}, + } + + df1 = DataFrame(df1) + df2 = DataFrame(df2) + + _check_bin_op(operator.and_) + _check_bin_op(operator.or_) + _check_bin_op(operator.xor) + + _check_unary_op(operator.inv) # TODO: belongs elsewhere + + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") + def test_logical_with_nas(self): + d = DataFrame({"a": [np.nan, False], "b": [True, True]}) + + # GH4947 + # bool comparisons should return bool + result = d["a"] | d["b"] + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + + # GH4604, automatic casting here + result = d["a"].fillna(False) | d["b"] + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = d["a"].fillna(False, downcast=False) | d["b"] + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + def test_logical_ops_categorical_columns(self): + # GH#38367 + intervals = [Interval(1, 2), Interval(3, 4)] + data = DataFrame( + [[1, np.nan], [2, np.nan]], + columns=CategoricalIndex( + intervals, categories=intervals + [Interval(5, 6)] + ), + ) + mask = DataFrame( + [[False, False], [False, False]], columns=data.columns, dtype=bool + ) + result = mask | isnull(data) + expected = DataFrame( + [[False, True], [False, True]], + columns=CategoricalIndex( + intervals, categories=intervals + [Interval(5, 6)] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_int_dtype_different_index_not_bool(self): + # GH 52500 + df1 = DataFrame([1, 2, 3], index=[10, 11, 23], columns=["a"]) + df2 = DataFrame([10, 20, 30], index=[11, 10, 23], columns=["a"]) + result = np.bitwise_xor(df1, df2) + expected = DataFrame([21, 8, 29], index=[10, 11, 23], columns=["a"]) + tm.assert_frame_equal(result, expected) + + result = df1 ^ df2 + tm.assert_frame_equal(result, expected) + + def test_different_dtypes_different_index_raises(self): + # GH 52538 + df1 = DataFrame([1, 2], index=["a", "b"]) + df2 = DataFrame([3, 4], index=["b", "c"]) + with pytest.raises(TypeError, match="unsupported operand type"): + df1 & df2 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_nonunique_indexes.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_nonunique_indexes.py new file mode 100644 index 0000000000000000000000000000000000000000..34f172e900ab7e16d66afe32aed3d5b87be301c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_nonunique_indexes.py @@ -0,0 +1,337 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameNonuniqueIndexes: + def test_setattr_columns_vs_construct_with_columns(self): + # assignment + # GH 3687 + arr = np.random.default_rng(2).standard_normal((3, 2)) + idx = list(range(2)) + df = DataFrame(arr, columns=["A", "A"]) + df.columns = idx + expected = DataFrame(arr, columns=idx) + tm.assert_frame_equal(df, expected) + + def test_setattr_columns_vs_construct_with_columns_datetimeindx(self): + idx = date_range("20130101", periods=4, freq="QE-NOV") + df = DataFrame( + [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"] + ) + df.columns = idx + expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx) + tm.assert_frame_equal(df, expected) + + def test_insert_with_duplicate_columns(self): + # insert + df = DataFrame( + [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], + columns=["foo", "bar", "foo", "hello"], + ) + df["string"] = "bah" + expected = DataFrame( + [[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]], + columns=["foo", "bar", "foo", "hello", "string"], + ) + tm.assert_frame_equal(df, expected) + with pytest.raises(ValueError, match="Length of value"): + df.insert(0, "AnotherColumn", range(len(df.index) - 1)) + + # insert same dtype + df["foo2"] = 3 + expected = DataFrame( + [[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]], + columns=["foo", "bar", "foo", "hello", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # set (non-dup) + df["foo2"] = 4 + expected = DataFrame( + [[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]], + columns=["foo", "bar", "foo", "hello", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + df["foo2"] = 3 + + # delete (non dup) + del df["bar"] + expected = DataFrame( + [[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]], + columns=["foo", "foo", "hello", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # try to delete again (its not consolidated) + del df["hello"] + expected = DataFrame( + [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]], + columns=["foo", "foo", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # consolidate + df = df._consolidate() + expected = DataFrame( + [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]], + columns=["foo", "foo", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # insert + df.insert(2, "new_col", 5.0) + expected = DataFrame( + [[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]], + columns=["foo", "foo", "new_col", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # insert a dup + with pytest.raises(ValueError, match="cannot insert"): + df.insert(2, "new_col", 4.0) + + df.insert(2, "new_col", 4.0, allow_duplicates=True) + expected = DataFrame( + [ + [1, 1, 4.0, 5.0, "bah", 3], + [1, 2, 4.0, 5.0, "bah", 3], + [2, 3, 4.0, 5.0, "bah", 3], + ], + columns=["foo", "foo", "new_col", "new_col", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + # delete (dup) + del df["foo"] + expected = DataFrame( + [[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]], + columns=["new_col", "new_col", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + def test_dup_across_dtypes(self): + # dup across dtypes + df = DataFrame( + [[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]], + columns=["foo", "bar", "foo", "hello"], + ) + + df["foo2"] = 7.0 + expected = DataFrame( + [[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]], + columns=["foo", "bar", "foo", "hello", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + result = df["foo"] + expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"]) + tm.assert_frame_equal(result, expected) + + # multiple replacements + df["foo"] = "string" + expected = DataFrame( + [ + ["string", 1, "string", 5, 7.0], + ["string", 1, "string", 5, 7.0], + ["string", 1, "string", 5, 7.0], + ], + columns=["foo", "bar", "foo", "hello", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + del df["foo"] + expected = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"] + ) + tm.assert_frame_equal(df, expected) + + def test_column_dups_indexes(self): + # check column dups with index equal and not equal to df's index + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "A"], + ) + for index in [df.index, pd.Index(list("edcba"))]: + this_df = df.copy() + expected_ser = Series(index.values, index=this_df.index) + expected_df = DataFrame( + {"A": expected_ser, "B": this_df["B"]}, + columns=["A", "B", "A"], + ) + this_df["A"] = index + tm.assert_frame_equal(this_df, expected_df) + + def test_changing_dtypes_with_duplicate_columns(self): + # multiple assignments that change dtypes + # the location indexer is a slice + # GH 6120 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["that", "that"] + ) + expected = DataFrame(1.0, index=range(5), columns=["that", "that"]) + + df["that"] = 1.0 + tm.assert_frame_equal(df, expected) + + df = DataFrame( + np.random.default_rng(2).random((5, 2)), columns=["that", "that"] + ) + expected = DataFrame(1, index=range(5), columns=["that", "that"]) + + df["that"] = 1 + tm.assert_frame_equal(df, expected) + + def test_dup_columns_comparisons(self): + # equality + df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"]) + df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"]) + + # not-comparing like-labelled + msg = ( + r"Can only compare identically-labeled \(both index and columns\) " + "DataFrame objects" + ) + with pytest.raises(ValueError, match=msg): + df1 == df2 + + df1r = df1.reindex_like(df2) + result = df1r == df2 + expected = DataFrame( + [[False, True], [True, False], [False, False], [True, False]], + columns=["A", "A"], + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_column_selection(self): + # mixed column selection + # GH 5639 + dfbool = DataFrame( + { + "one": Series([True, True, False], index=["a", "b", "c"]), + "two": Series([False, False, True, False], index=["a", "b", "c", "d"]), + "three": Series([False, True, True, True], index=["a", "b", "c", "d"]), + } + ) + expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1) + result = dfbool[["one", "three", "one"]] + tm.assert_frame_equal(result, expected) + + def test_multi_axis_dups(self): + # multi-axis dups + # GH 6121 + df = DataFrame( + np.arange(25.0).reshape(5, 5), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "C", "D", "E"], + ) + z = df[["A", "C", "A"]].copy() + expected = z.loc[["a", "c", "a"]] + + df = DataFrame( + np.arange(25.0).reshape(5, 5), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "C", "D", "E"], + ) + z = df[["A", "C", "A"]] + result = z.loc[["a", "c", "a"]] + tm.assert_frame_equal(result, expected) + + def test_columns_with_dups(self): + # GH 3468 related + + # basic + df = DataFrame([[1, 2]], columns=["a", "a"]) + df.columns = ["a", "a.1"] + expected = DataFrame([[1, 2]], columns=["a", "a.1"]) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"]) + df.columns = ["b", "a", "a.1"] + expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"]) + tm.assert_frame_equal(df, expected) + + def test_columns_with_dup_index(self): + # with a dup index + df = DataFrame([[1, 2]], columns=["a", "a"]) + df.columns = ["b", "b"] + expected = DataFrame([[1, 2]], columns=["b", "b"]) + tm.assert_frame_equal(df, expected) + + def test_multi_dtype(self): + # multi-dtype + df = DataFrame( + [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], + columns=["a", "a", "b", "b", "d", "c", "c"], + ) + df.columns = list("ABCDEFG") + expected = DataFrame( + [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG") + ) + tm.assert_frame_equal(df, expected) + + def test_multi_dtype2(self): + df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"]) + df.columns = ["a", "a.1", "a.2", "a.3"] + expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"]) + tm.assert_frame_equal(df, expected) + + def test_dups_across_blocks(self, using_array_manager): + # dups across blocks + df_float = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), dtype="float64" + ) + df_int = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)).astype("int64") + ) + df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns) + df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns) + df_dt = DataFrame( + pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns + ) + df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1) + + if not using_array_manager: + assert len(df._mgr.blknos) == len(df.columns) + assert len(df._mgr.blklocs) == len(df.columns) + + # testing iloc + for i in range(len(df.columns)): + df.iloc[:, i] + + def test_dup_columns_across_dtype(self): + # dup columns across dtype GH 2079/2194 + vals = [[1, -1, 2.0], [2, -2, 3.0]] + rs = DataFrame(vals, columns=["A", "A", "B"]) + xp = DataFrame(vals) + xp.columns = ["A", "A", "B"] + tm.assert_frame_equal(rs, xp) + + def test_set_value_by_index(self): + # See gh-12344 + warn = None + msg = "will attempt to set the values inplace" + + df = DataFrame(np.arange(9).reshape(3, 3).T) + df.columns = list("AAA") + expected = df.iloc[:, 2].copy() + + with tm.assert_produces_warning(warn, match=msg): + df.iloc[:, 0] = 3 + tm.assert_series_equal(df.iloc[:, 2], expected) + + df = DataFrame(np.arange(9).reshape(3, 3).T) + df.columns = [2, float(2), str(2)] + expected = df.iloc[:, 1].copy() + + with tm.assert_produces_warning(warn, match=msg): + df.iloc[:, 0] = 3 + tm.assert_series_equal(df.iloc[:, 1], expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_npfuncs.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_npfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..afb53bf2de93aa591ca9d7b99af185bc0c4083ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_npfuncs.py @@ -0,0 +1,89 @@ +""" +Tests for np.foo applied to DataFrame, not necessarily ufuncs. +""" +import numpy as np + +from pandas import ( + Categorical, + DataFrame, +) +import pandas._testing as tm + + +class TestAsArray: + def test_asarray_homogeneous(self): + df = DataFrame({"A": Categorical([1, 2]), "B": Categorical([1, 2])}) + result = np.asarray(df) + # may change from object in the future + expected = np.array([[1, 1], [2, 2]], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_np_sqrt(self, float_frame): + with np.errstate(all="ignore"): + result = np.sqrt(float_frame) + assert isinstance(result, type(float_frame)) + assert result.index.is_(float_frame.index) + assert result.columns.is_(float_frame.columns) + + tm.assert_frame_equal(result, float_frame.apply(np.sqrt)) + + def test_sum_deprecated_axis_behavior(self): + # GH#52042 deprecated behavior of df.sum(axis=None), which gets + # called when we do np.sum(df) + + arr = np.random.default_rng(2).standard_normal((4, 3)) + df = DataFrame(arr) + + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + res = np.sum(df) + + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.sum(axis=None) + tm.assert_series_equal(res, expected) + + def test_np_ravel(self): + # GH26247 + arr = np.array( + [ + [0.11197053, 0.44361564, -0.92589452], + [0.05883648, -0.00948922, -0.26469934], + ] + ) + + result = np.ravel([DataFrame(batch.reshape(1, 3)) for batch in arr]) + expected = np.array( + [ + 0.11197053, + 0.44361564, + -0.92589452, + 0.05883648, + -0.00948922, + -0.26469934, + ] + ) + tm.assert_numpy_array_equal(result, expected) + + result = np.ravel(DataFrame(arr[0].reshape(1, 3), columns=["x1", "x2", "x3"])) + expected = np.array([0.11197053, 0.44361564, -0.92589452]) + tm.assert_numpy_array_equal(result, expected) + + result = np.ravel( + [ + DataFrame(batch.reshape(1, 3), columns=["x1", "x2", "x3"]) + for batch in arr + ] + ) + expected = np.array( + [ + 0.11197053, + 0.44361564, + -0.92589452, + 0.05883648, + -0.00948922, + -0.26469934, + ] + ) + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_query_eval.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_query_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..2c807c72582c52d748d043263a736b5c9dbdfffa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_query_eval.py @@ -0,0 +1,1425 @@ +import operator + +import numpy as np +import pytest + +from pandas.errors import ( + NumExprClobberingError, + UndefinedVariableError, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm +from pandas.core.computation.check import NUMEXPR_INSTALLED + + +@pytest.fixture(params=["python", "pandas"], ids=lambda x: x) +def parser(request): + return request.param + + +@pytest.fixture( + params=["python", pytest.param("numexpr", marks=td.skip_if_no("numexpr"))], + ids=lambda x: x, +) +def engine(request): + return request.param + + +def skip_if_no_pandas_parser(parser): + if parser != "pandas": + pytest.skip(f"cannot evaluate with parser={parser}") + + +class TestCompat: + @pytest.fixture + def df(self): + return DataFrame({"A": [1, 2, 3]}) + + @pytest.fixture + def expected1(self, df): + return df[df.A > 0] + + @pytest.fixture + def expected2(self, df): + return df.A + 1 + + def test_query_default(self, df, expected1, expected2): + # GH 12749 + # this should always work, whether NUMEXPR_INSTALLED or not + result = df.query("A>0") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1") + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_None(self, df, expected1, expected2): + result = df.query("A>0", engine=None) + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine=None) + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_python(self, df, expected1, expected2): + result = df.query("A>0", engine="python") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine="python") + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_numexpr(self, df, expected1, expected2): + if NUMEXPR_INSTALLED: + result = df.query("A>0", engine="numexpr") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine="numexpr") + tm.assert_series_equal(result, expected2, check_names=False) + else: + msg = ( + r"'numexpr' is not installed or an unsupported version. " + r"Cannot use engine='numexpr' for query/eval if 'numexpr' is " + r"not installed" + ) + with pytest.raises(ImportError, match=msg): + df.query("A>0", engine="numexpr") + with pytest.raises(ImportError, match=msg): + df.eval("A+1", engine="numexpr") + + +class TestDataFrameEval: + # smaller hits python, larger hits numexpr + @pytest.mark.parametrize("n", [4, 4000]) + @pytest.mark.parametrize( + "op_str,op,rop", + [ + ("+", "__add__", "__radd__"), + ("-", "__sub__", "__rsub__"), + ("*", "__mul__", "__rmul__"), + ("/", "__truediv__", "__rtruediv__"), + ], + ) + def test_ops(self, op_str, op, rop, n): + # tst ops and reversed ops in evaluation + # GH7198 + + df = DataFrame(1, index=range(n), columns=list("abcd")) + df.iloc[0] = 2 + m = df.mean() + + base = DataFrame( # noqa: F841 + np.tile(m.values, n).reshape(n, -1), columns=list("abcd") + ) + + expected = eval(f"base {op_str} df") + + # ops as strings + result = eval(f"m {op_str} df") + tm.assert_frame_equal(result, expected) + + # these are commutative + if op in ["+", "*"]: + result = getattr(df, op)(m) + tm.assert_frame_equal(result, expected) + + # these are not + elif op in ["-", "/"]: + result = getattr(df, rop)(m) + tm.assert_frame_equal(result, expected) + + def test_dataframe_sub_numexpr_path(self): + # GH7192: Note we need a large number of rows to ensure this + # goes through the numexpr path + df = DataFrame({"A": np.random.default_rng(2).standard_normal(25000)}) + df.iloc[0:5] = np.nan + expected = 1 - np.isnan(df.iloc[0:25]) + result = (1 - np.isnan(df)).iloc[0:25] + tm.assert_frame_equal(result, expected) + + def test_query_non_str(self): + # GH 11485 + df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]}) + + msg = "expr must be a string to be evaluated" + with pytest.raises(ValueError, match=msg): + df.query(lambda x: x.B == "b") + + with pytest.raises(ValueError, match=msg): + df.query(111) + + def test_query_empty_string(self): + # GH 13139 + df = DataFrame({"A": [1, 2, 3]}) + + msg = "expr cannot be an empty string" + with pytest.raises(ValueError, match=msg): + df.query("") + + def test_eval_resolvers_as_list(self): + # GH 14095 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab") + ) + dict1 = {"a": 1} + dict2 = {"b": 2} + assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + + def test_eval_resolvers_combined(self): + # GH 34966 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab") + ) + dict1 = {"c": 2} + + # Both input and default index/column resolvers should be usable + result = df.eval("a + b * c", resolvers=[dict1]) + + expected = df["a"] + df["b"] * dict1["c"] + tm.assert_series_equal(result, expected) + + def test_eval_object_dtype_binop(self): + # GH#24883 + df = DataFrame({"a1": ["Y", "N"]}) + res = df.eval("c = ((a1 == 'Y') & True)") + expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]}) + tm.assert_frame_equal(res, expected) + + +class TestDataFrameQueryWithMultiIndex: + def test_query_with_named_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.random.default_rng(2).choice(["eggs", "ham"], size=10) + index = MultiIndex.from_arrays([a, b], names=["color", "food"]) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + ind = Series( + df.index.get_level_values("color").values, index=index, name="color" + ) + + # equality + res1 = df.query('color == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == color', parser=parser, engine=engine) + exp = df[ind == "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('color != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != color', parser=parser, engine=engine) + exp = df[ind != "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('color == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == color', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('color != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != color', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in color', parser=parser, engine=engine) + res2 = df.query('"red" in color', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in color', parser=parser, engine=engine) + res2 = df.query('"red" not in color', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + def test_query_with_unnamed_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.random.default_rng(2).choice(["eggs", "ham"], size=10) + index = MultiIndex.from_arrays([a, b]) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + ind = Series(df.index.get_level_values(0).values, index=index) + + res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine) + exp = df[ind == "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine) + exp = df[ind != "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # ## LEVEL 1 + ind = Series(df.index.get_level_values(1).values, index=index) + res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine) + exp = df[ind == "eggs"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine) + exp = df[ind != "eggs"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + def test_query_with_partially_named_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.arange(10) + index = MultiIndex.from_arrays([a, b]) + index.names = [None, "rating"] + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + res = df.query("rating == 1", parser=parser, engine=engine) + ind = Series( + df.index.get_level_values("rating").values, index=index, name="rating" + ) + exp = df[ind == 1] + tm.assert_frame_equal(res, exp) + + res = df.query("rating != 1", parser=parser, engine=engine) + ind = Series( + df.index.get_level_values("rating").values, index=index, name="rating" + ) + exp = df[ind != 1] + tm.assert_frame_equal(res, exp) + + res = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind == "red"] + tm.assert_frame_equal(res, exp) + + res = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind != "red"] + tm.assert_frame_equal(res, exp) + + def test_query_multiindex_get_index_resolvers(self): + df = DataFrame( + np.ones((10, 3)), + index=MultiIndex.from_arrays( + [range(10) for _ in range(2)], names=["spam", "eggs"] + ), + ) + resolvers = df._get_index_resolvers() + + def to_series(mi, level): + level_values = mi.get_level_values(level) + s = level_values.to_series() + s.index = mi + return s + + col_series = df.columns.to_series() + expected = { + "index": df.index, + "columns": col_series, + "spam": to_series(df.index, "spam"), + "eggs": to_series(df.index, "eggs"), + "clevel_0": col_series, + } + for k, v in resolvers.items(): + if isinstance(v, Index): + assert v.is_(expected[k]) + elif isinstance(v, Series): + tm.assert_series_equal(v, expected[k]) + else: + raise AssertionError("object must be a Series or Index") + + +@td.skip_if_no("numexpr") +class TestDataFrameQueryNumExprPandas: + @pytest.fixture + def engine(self): + return "numexpr" + + @pytest.fixture + def parser(self): + return "pandas" + + def test_date_query_with_attribute_access(self, engine, parser): + skip_if_no_pandas_parser(parser) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query( + "@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_no_attribute_access(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_NaT(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates2"] = date_range("1/1/2013", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT(self, engine, parser): + n = 10 + # Cast to object to avoid implicit cast when setting entry to pd.NaT below + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype( + {0: object} + ) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.iloc[0, 0] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT_duplicates(self, engine, parser): + n = 10 + d = {} + d["dates1"] = date_range("1/1/2012", periods=n) + d["dates3"] = date_range("1/1/2014", periods=n) + df = DataFrame(d) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_non_date(self, engine, parser): + n = 10 + df = DataFrame( + {"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)} + ) + + result = df.query("dates == nondate", parser=parser, engine=engine) + assert len(result) == 0 + + result = df.query("dates != nondate", parser=parser, engine=engine) + tm.assert_frame_equal(result, df) + + msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray" + for op in ["<", ">", "<=", ">="]: + with pytest.raises(TypeError, match=msg): + df.query(f"dates {op} nondate", parser=parser, engine=engine) + + def test_query_syntax_error(self, engine, parser): + df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)}) + msg = "invalid syntax" + with pytest.raises(SyntaxError, match=msg): + df.query("i - +", engine=engine, parser=parser) + + def test_query_scope(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 2)), columns=list("ab") + ) + + a, b = 1, 2 # noqa: F841 + res = df.query("a > b", engine=engine, parser=parser) + expected = df[df.a > df.b] + tm.assert_frame_equal(res, expected) + + res = df.query("@a > b", engine=engine, parser=parser) + expected = df[a > df.b] + tm.assert_frame_equal(res, expected) + + # no local variable c + with pytest.raises( + UndefinedVariableError, match="local variable 'c' is not defined" + ): + df.query("@a > b > @c", engine=engine, parser=parser) + + # no column named 'c' + with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"): + df.query("@a > b > c", engine=engine, parser=parser) + + def test_query_doesnt_pickup_local(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + # we don't pick up the local 'sin' + with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"): + df.query("sin > 5", engine=engine, parser=parser) + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + msg = "Variables in expression.+" + with pytest.raises(NumExprClobberingError, match=msg): + df.query("sin > 5", engine=engine, parser=parser) + + def test_query(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + + tm.assert_frame_equal( + df.query("a < b", engine=engine, parser=parser), df[df.a < df.b] + ) + tm.assert_frame_equal( + df.query("a + b > b * c", engine=engine, parser=parser), + df[df.a + df.b > df.b * df.c], + ) + + def test_query_index_with_name(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).integers(10, size=(10, 3)), + index=Index(range(10), name="blob"), + columns=["a", "b", "c"], + ) + res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser) + expec = df[(df.index < 5) & (df.a < df.b)] + tm.assert_frame_equal(res, expec) + + res = df.query("blob < b", engine=engine, parser=parser) + expec = df[df.index < df.b] + + tm.assert_frame_equal(res, expec) + + def test_query_index_without_name(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).integers(10, size=(10, 3)), + index=range(10), + columns=["a", "b", "c"], + ) + + # "index" should refer to the index + res = df.query("index < b", engine=engine, parser=parser) + expec = df[df.index < df.b] + tm.assert_frame_equal(res, expec) + + # test against a scalar + res = df.query("index < 5", engine=engine, parser=parser) + expec = df[df.index < 5] + tm.assert_frame_equal(res, expec) + + def test_nested_scope(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + expected = df[(df > 0) & (df2 > 0)] + + result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + result = pd.eval( + "df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser + ) + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + tm.assert_frame_equal(result, expected) + + result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser) + expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + def test_nested_raises_on_local_self_reference(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + + # can't reference ourself b/c we're a local so @ is necessary + with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"): + df.query("df > 0", engine=engine, parser=parser) + + def test_local_syntax(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame( + np.random.default_rng(2).standard_normal((100, 10)), + columns=list("abcdefghij"), + ) + b = 1 + expect = df[df.a < b] + result = df.query("a < @b", engine=engine, parser=parser) + tm.assert_frame_equal(result, expect) + + expect = df[df.a < df.b] + result = df.query("a < b", engine=engine, parser=parser) + tm.assert_frame_equal(result, expect) + + def test_chained_cmp_and_in(self, engine, parser): + skip_if_no_pandas_parser(parser) + cols = list("abc") + df = DataFrame( + np.random.default_rng(2).standard_normal((100, len(cols))), columns=cols + ) + res = df.query( + "a < b < c and a not in b not in c", engine=engine, parser=parser + ) + ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) + expec = df[ind] + tm.assert_frame_equal(res, expec) + + def test_local_variable_with_in(self, engine, parser): + skip_if_no_pandas_parser(parser) + a = Series(np.random.default_rng(2).integers(3, size=15), name="a") + b = Series(np.random.default_rng(2).integers(10, size=15), name="b") + df = DataFrame({"a": a, "b": b}) + + expected = df.loc[(df.b - 1).isin(a)] + result = df.query("b - 1 in a", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + b = Series(np.random.default_rng(2).integers(10, size=15), name="b") + expected = df.loc[(b - 1).isin(a)] + result = df.query("@b - 1 in a", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + def test_at_inside_string(self, engine, parser): + skip_if_no_pandas_parser(parser) + c = 1 # noqa: F841 + df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]}) + result = df.query('a == "@c"', engine=engine, parser=parser) + expected = df[df.a == "@c"] + tm.assert_frame_equal(result, expected) + + def test_query_undefined_local(self): + engine, parser = self.engine, self.parser + skip_if_no_pandas_parser(parser) + + df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=list("ab")) + with pytest.raises( + UndefinedVariableError, match="local variable 'c' is not defined" + ): + df.query("a == @c", engine=engine, parser=parser) + + def test_index_resolvers_come_after_columns_with_the_same_name( + self, engine, parser + ): + n = 1 # noqa: F841 + a = np.r_[20:101:20] + + df = DataFrame( + {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)} + ) + df.index.name = "index" + result = df.query("index > 5", engine=engine, parser=parser) + expected = df[df["index"] > 5] + tm.assert_frame_equal(result, expected) + + df = DataFrame( + {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)} + ) + result = df.query("ilevel_0 > 5", engine=engine, parser=parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + + df = DataFrame({"a": a, "b": np.random.default_rng(2).standard_normal(a.size)}) + df.index.name = "a" + result = df.query("a > 5", engine=engine, parser=parser) + expected = df[df.a > 5] + tm.assert_frame_equal(result, expected) + + result = df.query("index > 5", engine=engine, parser=parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op, f", [["==", operator.eq], ["!=", operator.ne]]) + def test_inf(self, op, f, engine, parser): + n = 10 + df = DataFrame( + { + "a": np.random.default_rng(2).random(n), + "b": np.random.default_rng(2).random(n), + } + ) + df.loc[::2, 0] = np.inf + q = f"a {op} inf" + expected = df[f(df.a, np.inf)] + result = df.query(q, engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + def test_check_tz_aware_index_query(self, tz_aware_fixture): + # https://github.com/pandas-dev/pandas/issues/29463 + tz = tz_aware_fixture + df_index = date_range( + start="2019-01-01", freq="1d", periods=10, tz=tz, name="time" + ) + expected = DataFrame(index=df_index) + df = DataFrame(index=df_index) + result = df.query('"2018-01-03 00:00:00+00" < time') + tm.assert_frame_equal(result, expected) + + expected = DataFrame(df_index) + result = df.reset_index().query('"2018-01-03 00:00:00+00" < time') + tm.assert_frame_equal(result, expected) + + def test_method_calls_in_query(self, engine, parser): + # https://github.com/pandas-dev/pandas/issues/22435 + n = 10 + df = DataFrame( + { + "a": 2 * np.random.default_rng(2).random(n), + "b": np.random.default_rng(2).random(n), + } + ) + expected = df[df["a"].astype("int") == 0] + result = df.query("a.astype('int') == 0", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "a": np.where( + np.random.default_rng(2).random(n) < 0.5, + np.nan, + np.random.default_rng(2).standard_normal(n), + ), + "b": np.random.default_rng(2).standard_normal(n), + } + ) + expected = df[df["a"].notnull()] + result = df.query("a.notnull()", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numexpr") +class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas): + @pytest.fixture + def engine(self): + return "numexpr" + + @pytest.fixture + def parser(self): + return "python" + + def test_date_query_no_attribute_access(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query( + "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_NaT(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates2"] = date_range("1/1/2013", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT + res = df.query( + "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query( + "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT(self, engine, parser): + n = 10 + # Cast to object to avoid implicit cast when setting entry to pd.NaT below + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype( + {0: object} + ) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.iloc[0, 0] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query( + "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT_duplicates(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + msg = r"'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("index < 20130101 < dates3", engine=engine, parser=parser) + + def test_nested_scope(self, engine, parser): + # smoke test + x = 1 # noqa: F841 + result = pd.eval("x + 1", engine=engine, parser=parser) + assert result == 2 + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + + # don't have the pandas parser + msg = r"The '@' prefix is only supported by the pandas parser" + with pytest.raises(SyntaxError, match=msg): + df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser) + + with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"): + df.query("(df>0) & (df2>0)", engine=engine, parser=parser) + + expected = df[(df > 0) & (df2 > 0)] + result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + result = pd.eval( + "df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser + ) + tm.assert_frame_equal(expected, result) + + def test_query_numexpr_with_min_and_max_columns(self): + df = DataFrame({"min": [1, 2, 3], "max": [4, 5, 6]}) + regex_to_match = ( + r"Variables in expression \"\(min\) == \(1\)\" " + r"overlap with builtins: \('min'\)" + ) + with pytest.raises(NumExprClobberingError, match=regex_to_match): + df.query("min == 1") + + regex_to_match = ( + r"Variables in expression \"\(max\) == \(1\)\" " + r"overlap with builtins: \('max'\)" + ) + with pytest.raises(NumExprClobberingError, match=regex_to_match): + df.query("max == 1") + + +class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas): + @pytest.fixture + def engine(self): + return "python" + + @pytest.fixture + def parser(self): + return "pandas" + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + expected = df[df.index > 5] + result = df.query("sin > 5", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + +class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython): + @pytest.fixture + def engine(self): + return "python" + + @pytest.fixture + def parser(self): + return "python" + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + expected = df[df.index > 5] + result = df.query("sin > 5", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + +class TestDataFrameQueryStrings: + def test_str_query_method(self, parser, engine): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"]) + df["strings"] = Series(list("aabbccddee")) + expect = df[df.strings == "a"] + + if parser != "pandas": + col = "strings" + lst = '"a"' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = "==", "!=" + ops = 2 * ([eq] + [ne]) + msg = r"'(Not)?In' nodes are not implemented" + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = f"{lhs} {op} {rhs}" + with pytest.raises(NotImplementedError, match=msg): + df.query( + ex, + engine=engine, + parser=parser, + local_dict={"strings": df.strings}, + ) + else: + res = df.query('"a" == strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('strings == "a"', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + tm.assert_frame_equal(res, df[df.strings.isin(["a"])]) + + expect = df[df.strings != "a"] + res = df.query('strings != "a"', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('"a" != strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + tm.assert_frame_equal(res, df[~df.strings.isin(["a"])]) + + def test_str_list_query_method(self, parser, engine): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"]) + df["strings"] = Series(list("aabbccddee")) + expect = df[df.strings.isin(["a", "b"])] + + if parser != "pandas": + col = "strings" + lst = '["a", "b"]' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = "==", "!=" + ops = 2 * ([eq] + [ne]) + msg = r"'(Not)?In' nodes are not implemented" + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = f"{lhs} {op} {rhs}" + with pytest.raises(NotImplementedError, match=msg): + df.query(ex, engine=engine, parser=parser) + else: + res = df.query('strings == ["a", "b"]', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('["a", "b"] == strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + expect = df[~df.strings.isin(["a", "b"])] + + res = df.query('strings != ["a", "b"]', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('["a", "b"] != strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + def test_query_with_string_columns(self, parser, engine): + df = DataFrame( + { + "a": list("aaaabbbbcccc"), + "b": list("aabbccddeeff"), + "c": np.random.default_rng(2).integers(5, size=12), + "d": np.random.default_rng(2).integers(9, size=12), + } + ) + if parser == "pandas": + res = df.query("a in b", parser=parser, engine=engine) + expec = df[df.a.isin(df.b)] + tm.assert_frame_equal(res, expec) + + res = df.query("a in b and c < d", parser=parser, engine=engine) + expec = df[df.a.isin(df.b) & (df.c < df.d)] + tm.assert_frame_equal(res, expec) + else: + msg = r"'(Not)?In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("a in b", parser=parser, engine=engine) + + msg = r"'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("a in b and c < d", parser=parser, engine=engine) + + def test_object_array_eq_ne(self, parser, engine, using_infer_string): + df = DataFrame( + { + "a": list("aaaabbbbcccc"), + "b": list("aabbccddeeff"), + "c": np.random.default_rng(2).integers(5, size=12), + "d": np.random.default_rng(2).integers(9, size=12), + } + ) + warning = RuntimeWarning if using_infer_string and engine == "numexpr" else None + with tm.assert_produces_warning(warning): + res = df.query("a == b", parser=parser, engine=engine) + exp = df[df.a == df.b] + tm.assert_frame_equal(res, exp) + + with tm.assert_produces_warning(warning): + res = df.query("a != b", parser=parser, engine=engine) + exp = df[df.a != df.b] + tm.assert_frame_equal(res, exp) + + def test_query_with_nested_strings(self, parser, engine): + skip_if_no_pandas_parser(parser) + events = [ + f"page {n} {act}" for n in range(1, 4) for act in ["load", "exit"] + ] * 2 + stamps1 = date_range("2014-01-01 0:00:01", freq="30s", periods=6) + stamps2 = date_range("2014-02-01 1:00:01", freq="30s", periods=6) + df = DataFrame( + { + "id": np.arange(1, 7).repeat(2), + "event": events, + "timestamp": stamps1.append(stamps2), + } + ) + + expected = df[df.event == '"page 1 load"'] + res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine) + tm.assert_frame_equal(expected, res) + + def test_query_with_nested_special_character(self, parser, engine): + skip_if_no_pandas_parser(parser) + df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]}) + res = df.query('a == "test & test"', parser=parser, engine=engine) + expec = df[df.a == "test & test"] + tm.assert_frame_equal(res, expec) + + @pytest.mark.parametrize( + "op, func", + [ + ["<", operator.lt], + [">", operator.gt], + ["<=", operator.le], + [">=", operator.ge], + ], + ) + def test_query_lex_compare_strings( + self, parser, engine, op, func, using_infer_string + ): + a = Series(np.random.default_rng(2).choice(list("abcde"), 20)) + b = Series(np.arange(a.size)) + df = DataFrame({"X": a, "Y": b}) + + warning = RuntimeWarning if using_infer_string and engine == "numexpr" else None + with tm.assert_produces_warning(warning): + res = df.query(f'X {op} "d"', engine=engine, parser=parser) + expected = df[func(df.X, "d")] + tm.assert_frame_equal(res, expected) + + def test_query_single_element_booleans(self, parser, engine): + columns = "bid", "bidsize", "ask", "asksize" + data = np.random.default_rng(2).integers(2, size=(1, len(columns))).astype(bool) + df = DataFrame(data, columns=columns) + res = df.query("bid & ask", engine=engine, parser=parser) + expected = df[df.bid & df.ask] + tm.assert_frame_equal(res, expected) + + def test_query_string_scalar_variable(self, parser, engine): + skip_if_no_pandas_parser(parser) + df = DataFrame( + { + "Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"], + "Price": [109.70, 109.72, 183.30, 183.35], + } + ) + e = df[df.Symbol == "BUD US"] + symb = "BUD US" # noqa: F841 + r = df.query("Symbol == @symb", parser=parser, engine=engine) + tm.assert_frame_equal(e, r) + + @pytest.mark.parametrize( + "in_list", + [ + [None, "asdf", "ghjk"], + ["asdf", None, "ghjk"], + ["asdf", "ghjk", None], + [None, None, "asdf"], + ["asdf", None, None], + [None, None, None], + ], + ) + def test_query_string_null_elements(self, in_list): + # GITHUB ISSUE #31516 + parser = "pandas" + engine = "python" + expected = {i: value for i, value in enumerate(in_list) if value == "asdf"} + + df_expected = DataFrame({"a": expected}, dtype="string") + df_expected.index = df_expected.index.astype("int64") + df = DataFrame({"a": in_list}, dtype="string") + res1 = df.query("a == 'asdf'", parser=parser, engine=engine) + res2 = df[df["a"] == "asdf"] + res3 = df.query("a <= 'asdf'", parser=parser, engine=engine) + tm.assert_frame_equal(res1, df_expected) + tm.assert_frame_equal(res1, res2) + tm.assert_frame_equal(res1, res3) + tm.assert_frame_equal(res2, res3) + + +class TestDataFrameEvalWithFrame: + @pytest.fixture + def frame(self): + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=list("abc") + ) + + def test_simple_expr(self, frame, parser, engine): + res = frame.eval("a + b", engine=engine, parser=parser) + expect = frame.a + frame.b + tm.assert_series_equal(res, expect) + + def test_bool_arith_expr(self, frame, parser, engine): + res = frame.eval("a[a < 1] + b", engine=engine, parser=parser) + expect = frame.a[frame.a < 1] + frame.b + tm.assert_series_equal(res, expect) + + @pytest.mark.parametrize("op", ["+", "-", "*", "/"]) + def test_invalid_type_for_operator_raises(self, parser, engine, op): + df = DataFrame({"a": [1, 2], "b": ["c", "d"]}) + msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'|Cannot" + + with pytest.raises(TypeError, match=msg): + df.eval(f"a {op} b", engine=engine, parser=parser) + + +class TestDataFrameQueryBacktickQuoting: + @pytest.fixture + def df(self): + """ + Yields a dataframe with strings that may or may not need escaping + by backticks. The last two columns cannot be escaped by backticks + and should raise a ValueError. + """ + yield DataFrame( + { + "A": [1, 2, 3], + "B B": [3, 2, 1], + "C C": [4, 5, 6], + "C C": [7, 4, 3], + "C_C": [8, 9, 10], + "D_D D": [11, 1, 101], + "E.E": [6, 3, 5], + "F-F": [8, 1, 10], + "1e1": [2, 4, 8], + "def": [10, 11, 2], + "A (x)": [4, 1, 3], + "B(x)": [1, 1, 5], + "B (x)": [2, 7, 4], + " &^ :!€$?(} > <++*'' ": [2, 5, 6], + "": [10, 11, 1], + " A": [4, 7, 9], + " ": [1, 2, 1], + "it's": [6, 3, 1], + "that's": [9, 1, 8], + "☺": [8, 7, 6], + "foo#bar": [2, 4, 5], + 1: [5, 7, 9], + } + ) + + def test_single_backtick_variable_query(self, df): + res = df.query("1 < `B B`") + expect = df[1 < df["B B"]] + tm.assert_frame_equal(res, expect) + + def test_two_backtick_variables_query(self, df): + res = df.query("1 < `B B` and 4 < `C C`") + expect = df[(1 < df["B B"]) & (4 < df["C C"])] + tm.assert_frame_equal(res, expect) + + def test_single_backtick_variable_expr(self, df): + res = df.eval("A + `B B`") + expect = df["A"] + df["B B"] + tm.assert_series_equal(res, expect) + + def test_two_backtick_variables_expr(self, df): + res = df.eval("`B B` + `C C`") + expect = df["B B"] + df["C C"] + tm.assert_series_equal(res, expect) + + def test_already_underscore_variable(self, df): + res = df.eval("`C_C` + A") + expect = df["C_C"] + df["A"] + tm.assert_series_equal(res, expect) + + def test_same_name_but_underscores(self, df): + res = df.eval("C_C + `C C`") + expect = df["C_C"] + df["C C"] + tm.assert_series_equal(res, expect) + + def test_mixed_underscores_and_spaces(self, df): + res = df.eval("A + `D_D D`") + expect = df["A"] + df["D_D D"] + tm.assert_series_equal(res, expect) + + def test_backtick_quote_name_with_no_spaces(self, df): + res = df.eval("A + `C_C`") + expect = df["A"] + df["C_C"] + tm.assert_series_equal(res, expect) + + def test_special_characters(self, df): + res = df.eval("`E.E` + `F-F` - A") + expect = df["E.E"] + df["F-F"] - df["A"] + tm.assert_series_equal(res, expect) + + def test_start_with_digit(self, df): + res = df.eval("A + `1e1`") + expect = df["A"] + df["1e1"] + tm.assert_series_equal(res, expect) + + def test_keyword(self, df): + res = df.eval("A + `def`") + expect = df["A"] + df["def"] + tm.assert_series_equal(res, expect) + + def test_unneeded_quoting(self, df): + res = df.query("`A` > 2") + expect = df[df["A"] > 2] + tm.assert_frame_equal(res, expect) + + def test_parenthesis(self, df): + res = df.query("`A (x)` > 2") + expect = df[df["A (x)"] > 2] + tm.assert_frame_equal(res, expect) + + def test_empty_string(self, df): + res = df.query("`` > 5") + expect = df[df[""] > 5] + tm.assert_frame_equal(res, expect) + + def test_multiple_spaces(self, df): + res = df.query("`C C` > 5") + expect = df[df["C C"] > 5] + tm.assert_frame_equal(res, expect) + + def test_start_with_spaces(self, df): + res = df.eval("` A` + ` `") + expect = df[" A"] + df[" "] + tm.assert_series_equal(res, expect) + + def test_lots_of_operators_string(self, df): + res = df.query("` &^ :!€$?(} > <++*'' ` > 4") + expect = df[df[" &^ :!€$?(} > <++*'' "] > 4] + tm.assert_frame_equal(res, expect) + + def test_missing_attribute(self, df): + message = "module 'pandas' has no attribute 'thing'" + with pytest.raises(AttributeError, match=message): + df.eval("@pd.thing") + + def test_failing_quote(self, df): + msg = r"(Could not convert ).*( to a valid Python identifier.)" + with pytest.raises(SyntaxError, match=msg): + df.query("`it's` > `that's`") + + def test_failing_character_outside_range(self, df): + msg = r"(Could not convert ).*( to a valid Python identifier.)" + with pytest.raises(SyntaxError, match=msg): + df.query("`☺` > 4") + + def test_failing_hashtag(self, df): + msg = "Failed to parse backticks" + with pytest.raises(SyntaxError, match=msg): + df.query("`foo#bar` > 4") + + def test_call_non_named_expression(self, df): + """ + Only attributes and variables ('named functions') can be called. + .__call__() is not an allowed attribute because that would allow + calling anything. + https://github.com/pandas-dev/pandas/pull/32460 + """ + + def func(*_): + return 1 + + funcs = [func] # noqa: F841 + + df.eval("@func()") + + with pytest.raises(TypeError, match="Only named functions are supported"): + df.eval("@funcs[0]()") + + with pytest.raises(TypeError, match="Only named functions are supported"): + df.eval("@funcs[0].__call__()") + + def test_ea_dtypes(self, any_numeric_ea_and_arrow_dtype): + # GH#29618 + df = DataFrame( + [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype + ) + warning = RuntimeWarning if NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.eval("c = b - a") + expected = DataFrame( + [[1, 2, 1], [3, 4, 1]], + columns=["a", "b", "c"], + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + def test_ea_dtypes_and_scalar(self): + # GH#29618 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"], dtype="Float64") + warning = RuntimeWarning if NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.eval("c = b - 1") + expected = DataFrame( + [[1, 2, 1], [3, 4, 3]], columns=["a", "b", "c"], dtype="Float64" + ) + tm.assert_frame_equal(result, expected) + + def test_ea_dtypes_and_scalar_operation(self, any_numeric_ea_and_arrow_dtype): + # GH#29618 + df = DataFrame( + [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype + ) + result = df.eval("c = 2 - 1") + expected = DataFrame( + { + "a": Series([1, 3], dtype=any_numeric_ea_and_arrow_dtype), + "b": Series([2, 4], dtype=any_numeric_ea_and_arrow_dtype), + "c": Series([1, 1], dtype=result["c"].dtype), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"]) + def test_query_ea_dtypes(self, dtype): + if dtype == "int64[pyarrow]": + pytest.importorskip("pyarrow") + # GH#50261 + df = DataFrame({"a": Series([1, 2], dtype=dtype)}) + ref = {2} # noqa: F841 + warning = RuntimeWarning if dtype == "Int64" and NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.query("a in @ref") + expected = DataFrame({"a": Series([2], dtype=dtype, index=[1])}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("engine", ["python", "numexpr"]) + @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"]) + def test_query_ea_equality_comparison(self, dtype, engine): + # GH#50261 + warning = RuntimeWarning if engine == "numexpr" else None + if engine == "numexpr" and not NUMEXPR_INSTALLED: + pytest.skip("numexpr not installed") + if dtype == "int64[pyarrow]": + pytest.importorskip("pyarrow") + df = DataFrame( + {"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)} + ) + with tm.assert_produces_warning(warning): + result = df.query("A == B", engine=engine) + expected = DataFrame( + { + "A": Series([1, 2], dtype="Int64", index=[0, 2]), + "B": Series([1, 2], dtype=dtype, index=[0, 2]), + } + ) + tm.assert_frame_equal(result, expected) + + def test_all_nat_in_object(self): + # GH#57068 + now = pd.Timestamp.now("UTC") # noqa: F841 + df = DataFrame({"a": pd.to_datetime([None, None], utc=True)}, dtype=object) + result = df.query("a > @now") + expected = DataFrame({"a": []}, dtype=object) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_reductions.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..66145c32c18d772f4316b8b32ddb9965a22b2abc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_reductions.py @@ -0,0 +1,2157 @@ +from datetime import timedelta +from decimal import Decimal +import re + +from dateutil.tz import tzlocal +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat.numpy import np_version_gt2 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + DatetimeIndex, + Index, + PeriodIndex, + RangeIndex, + Series, + Timestamp, + date_range, + isna, + notna, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.core import ( + algorithms, + nanops, +) + +is_windows_np2_or_is32 = (is_platform_windows() and not np_version_gt2) or not IS64 +is_windows_or_is32 = is_platform_windows() or not IS64 + + +def make_skipna_wrapper(alternative, skipna_alternative=None): + """ + Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + function + """ + if skipna_alternative: + + def skipna_wrapper(x): + return skipna_alternative(x.values) + + else: + + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper + + +def assert_stat_op_calc( + opname, + alternative, + frame, + has_skipna=True, + check_dtype=True, + check_dates=False, + rtol=1e-5, + atol=1e-8, + skipna_alternative=None, +): + """ + Check that operator opname works as advertised on frame + + Parameters + ---------- + opname : str + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + check_dtype : bool, default True + Whether the dtypes of the result of "frame.opname()" and + "alternative(frame)" should be checked. + check_dates : bool, default false + Whether opname should be tested on a Datetime Series + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + skipna_alternative : function, default None + NaN-safe version of alternative + """ + f = getattr(frame, opname) + + if check_dates: + df = DataFrame({"b": date_range("1/1/2001", periods=2)}) + with tm.assert_produces_warning(None): + result = getattr(df, opname)() + assert isinstance(result, Series) + + df["a"] = range(len(df)) + with tm.assert_produces_warning(None): + result = getattr(df, opname)() + assert isinstance(result, Series) + assert len(result) + + if has_skipna: + + def wrapper(x): + return alternative(x.values) + + skipna_wrapper = make_skipna_wrapper(alternative, skipna_alternative) + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + tm.assert_series_equal( + result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol + ) + tm.assert_series_equal( + result1, + frame.apply(wrapper, axis=1), + rtol=rtol, + atol=atol, + ) + else: + skipna_wrapper = alternative + + result0 = f(axis=0) + result1 = f(axis=1) + tm.assert_series_equal( + result0, + frame.apply(skipna_wrapper), + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + ) + + if opname in ["sum", "prod"]: + expected = frame.apply(skipna_wrapper, axis=1) + tm.assert_series_equal( + result1, expected, check_dtype=False, rtol=rtol, atol=atol + ) + + # check dtypes + if check_dtype: + lcd_dtype = frame.values.dtype + assert lcd_dtype == result0.dtype + assert lcd_dtype == result1.dtype + + # bad axis + with pytest.raises(ValueError, match="No axis named 2"): + f(axis=2) + + # all NA case + if has_skipna: + all_na = frame * np.nan + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname in ["sum", "prod"]: + unit = 1 if opname == "prod" else 0 # result for empty sum/prod + expected = Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) + + +@pytest.fixture +def bool_frame_with_na(): + """ + Fixture for DataFrame of booleans with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + """ + df = DataFrame( + np.concatenate( + [np.ones((15, 4), dtype=bool), np.zeros((15, 4), dtype=bool)], axis=0 + ), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + dtype=object, + ) + # set some NAs + df.iloc[5:10] = np.nan + df.iloc[15:20, -2:] = np.nan + return df + + +@pytest.fixture +def float_frame_with_na(): + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + """ + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + ) + # set some NAs + df.iloc[5:10] = np.nan + df.iloc[15:20, -2:] = np.nan + return df + + +class TestDataFrameAnalytics: + # --------------------------------------------------------------------- + # Reductions + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize( + "opname", + [ + "count", + "sum", + "mean", + "product", + "median", + "min", + "max", + "nunique", + "var", + "std", + "sem", + pytest.param("skew", marks=td.skip_if_no("scipy")), + pytest.param("kurt", marks=td.skip_if_no("scipy")), + ], + ) + def test_stat_op_api_float_string_frame( + self, float_string_frame, axis, opname, using_infer_string + ): + if ( + (opname in ("sum", "min", "max") and axis == 0) + or opname + in ( + "count", + "nunique", + ) + ) and not (using_infer_string and opname == "sum"): + getattr(float_string_frame, opname)(axis=axis) + else: + if opname in ["var", "std", "sem", "skew", "kurt"]: + msg = "could not convert string to float: 'bar'" + elif opname == "product": + if axis == 1: + msg = "can't multiply sequence by non-int of type 'float'" + else: + msg = "can't multiply sequence by non-int of type 'str'" + elif opname == "sum": + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname == "mean": + if axis == 0: + # different message on different builds + msg = "|".join( + [ + r"Could not convert \['.*'\] to numeric", + "Could not convert string '(bar){30}' to numeric", + ] + ) + else: + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname in ["min", "max"]: + msg = "'[><]=' not supported between instances of 'float' and 'str'" + elif opname == "median": + msg = re.compile( + r"Cannot convert \[.*\] to numeric|does not support", flags=re.S + ) + if not isinstance(msg, re.Pattern): + msg = msg + "|does not support" + with pytest.raises(TypeError, match=msg): + getattr(float_string_frame, opname)(axis=axis) + if opname != "nunique": + getattr(float_string_frame, opname)(axis=axis, numeric_only=True) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize( + "opname", + [ + "count", + "sum", + "mean", + "product", + "median", + "min", + "max", + "var", + "std", + "sem", + pytest.param("skew", marks=td.skip_if_no("scipy")), + pytest.param("kurt", marks=td.skip_if_no("scipy")), + ], + ) + def test_stat_op_api_float_frame(self, float_frame, axis, opname): + getattr(float_frame, opname)(axis=axis, numeric_only=False) + + def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame): + def count(s): + return notna(s).sum() + + def nunique(s): + return len(algorithms.unique1d(s.dropna())) + + def var(x): + return np.var(x, ddof=1) + + def std(x): + return np.std(x, ddof=1) + + def sem(x): + return np.std(x, ddof=1) / np.sqrt(len(x)) + + assert_stat_op_calc( + "nunique", + nunique, + float_frame_with_na, + has_skipna=False, + check_dtype=False, + check_dates=True, + ) + + # GH#32571: rol needed for flaky CI builds + # mixed types (with upcasting happening) + assert_stat_op_calc( + "sum", + np.sum, + mixed_float_frame.astype("float32"), + check_dtype=False, + rtol=1e-3, + ) + + assert_stat_op_calc( + "sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum + ) + assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True) + assert_stat_op_calc( + "product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod + ) + + assert_stat_op_calc("var", var, float_frame_with_na) + assert_stat_op_calc("std", std, float_frame_with_na) + assert_stat_op_calc("sem", sem, float_frame_with_na) + + assert_stat_op_calc( + "count", + count, + float_frame_with_na, + has_skipna=False, + check_dtype=False, + check_dates=True, + ) + + def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na): + sp_stats = pytest.importorskip("scipy.stats") + + def skewness(x): + if len(x) < 3: + return np.nan + return sp_stats.skew(x, bias=False) + + def kurt(x): + if len(x) < 4: + return np.nan + return sp_stats.kurtosis(x, bias=False) + + assert_stat_op_calc("skew", skewness, float_frame_with_na) + assert_stat_op_calc("kurt", kurt, float_frame_with_na) + + def test_median(self, float_frame_with_na, int_frame): + def wrapper(x): + if isna(x).any(): + return np.nan + return np.median(x) + + assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True) + assert_stat_op_calc( + "median", wrapper, int_frame, check_dtype=False, check_dates=True + ) + + @pytest.mark.parametrize( + "method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"] + ) + @pytest.mark.parametrize( + "df", + [ + DataFrame( + { + "a": [ + -0.00049987540199591344, + -0.0016467257772919831, + 0.00067695870775883013, + ], + "b": [-0, -0, 0.0], + "c": [ + 0.00031111847529610595, + 0.0014902627951905339, + -0.00094099200035979691, + ], + }, + index=["foo", "bar", "baz"], + dtype="O", + ), + DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object), + ], + ) + @pytest.mark.filterwarnings("ignore:Mismatched null-like values:FutureWarning") + def test_stat_operators_attempt_obj_array(self, method, df, axis): + # GH#676 + assert df.values.dtype == np.object_ + result = getattr(df, method)(axis=axis) + expected = getattr(df.astype("f8"), method)(axis=axis).astype(object) + if axis in [1, "columns"] and method in ["min", "max"]: + expected[expected.isna()] = None + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"]) + def test_mixed_ops(self, op): + # GH#16116 + df = DataFrame( + { + "int": [1, 2, 3, 4], + "float": [1.0, 2.0, 3.0, 4.0], + "str": ["a", "b", "c", "d"], + } + ) + msg = "|".join( + [ + "Could not convert", + "could not convert", + "can't multiply sequence by non-int", + "does not support", + ] + ) + with pytest.raises(TypeError, match=msg): + getattr(df, op)() + + with pd.option_context("use_bottleneck", False): + msg = "|".join( + [ + "Could not convert", + "could not convert", + "can't multiply sequence by non-int", + "does not support", + ] + ) + with pytest.raises(TypeError, match=msg): + getattr(df, op)() + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="sum doesn't work for arrow strings" + ) + def test_reduce_mixed_frame(self): + # GH 6806 + df = DataFrame( + { + "bool_data": [True, True, False, False, False], + "int_data": [10, 20, 30, 40, 50], + "string_data": ["a", "b", "c", "d", "e"], + } + ) + df.reindex(columns=["bool_data", "int_data", "string_data"]) + test = df.sum(axis=0) + tm.assert_numpy_array_equal( + test.values, np.array([2, 150, "abcde"], dtype=object) + ) + alt = df.T.sum(axis=1) + tm.assert_series_equal(test, alt) + + def test_nunique(self): + df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]}) + tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2})) + tm.assert_series_equal( + df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3}) + ) + tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2})) + tm.assert_series_equal( + df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2}) + ) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_mean_mixed_datetime_numeric(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2}) + result = df.mean() + expected = Series([1.0, Timestamp("2000", tz=tz)], index=["A", "B"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_mean_includes_datetimes(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + # Behavior in 0.24.0rc1 was buggy. + # As of 2.0 with numeric_only=None we do *not* drop datetime columns + df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2}) + result = df.mean() + + expected = Series([Timestamp("2000", tz=tz)], index=["A"]) + tm.assert_series_equal(result, expected) + + def test_mean_mixed_string_decimal(self): + # GH 11670 + # possible bug when calculating mean of DataFrame? + + d = [ + {"A": 2, "B": None, "C": Decimal("628.00")}, + {"A": 1, "B": None, "C": Decimal("383.00")}, + {"A": 3, "B": None, "C": Decimal("651.00")}, + {"A": 2, "B": None, "C": Decimal("575.00")}, + {"A": 4, "B": None, "C": Decimal("1114.00")}, + {"A": 1, "B": "TEST", "C": Decimal("241.00")}, + {"A": 2, "B": None, "C": Decimal("572.00")}, + {"A": 4, "B": None, "C": Decimal("609.00")}, + {"A": 3, "B": None, "C": Decimal("820.00")}, + {"A": 5, "B": None, "C": Decimal("1223.00")}, + ] + + df = DataFrame(d) + + with pytest.raises( + TypeError, match="unsupported operand type|does not support" + ): + df.mean() + result = df[["A", "C"]].mean() + expected = Series([2.7, 681.6], index=["A", "C"], dtype=object) + tm.assert_series_equal(result, expected) + + def test_var_std(self, datetime_frame): + result = datetime_frame.std(ddof=4) + expected = datetime_frame.apply(lambda x: x.std(ddof=4)) + tm.assert_almost_equal(result, expected) + + result = datetime_frame.var(ddof=4) + expected = datetime_frame.apply(lambda x: x.var(ddof=4)) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0) + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context("use_bottleneck", False): + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + @pytest.mark.parametrize("meth", ["sem", "var", "std"]) + def test_numeric_only_flag(self, meth): + # GH 9201 + df1 = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["foo", "bar", "baz"], + ) + # Cast to object to avoid implicit cast when setting entry to "100" below + df1 = df1.astype({"foo": object}) + # set one entry to a number in str format + df1.loc[0, "foo"] = "100" + + df2 = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["foo", "bar", "baz"], + ) + # Cast to object to avoid implicit cast when setting entry to "a" below + df2 = df2.astype({"foo": object}) + # set one entry to a non-number str + df2.loc[0, "foo"] = "a" + + result = getattr(df1, meth)(axis=1, numeric_only=True) + expected = getattr(df1[["bar", "baz"]], meth)(axis=1) + tm.assert_series_equal(expected, result) + + result = getattr(df2, meth)(axis=1, numeric_only=True) + expected = getattr(df2[["bar", "baz"]], meth)(axis=1) + tm.assert_series_equal(expected, result) + + # df1 has all numbers, df2 has a letter inside + msg = r"unsupported operand type\(s\) for -: 'float' and 'str'" + with pytest.raises(TypeError, match=msg): + getattr(df1, meth)(axis=1, numeric_only=False) + msg = "could not convert string to float: 'a'" + with pytest.raises(TypeError, match=msg): + getattr(df2, meth)(axis=1, numeric_only=False) + + def test_sem(self, datetime_frame): + result = datetime_frame.sem(ddof=4) + expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x))) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0) + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context("use_bottleneck", False): + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + @pytest.mark.parametrize( + "dropna, expected", + [ + ( + True, + { + "A": [12], + "B": [10.0], + "C": [1.0], + "D": ["a"], + "E": Categorical(["a"], categories=["a"]), + "F": DatetimeIndex(["2000-01-02"], dtype="M8[ns]"), + "G": to_timedelta(["1 days"]), + }, + ), + ( + False, + { + "A": [12], + "B": [10.0], + "C": [np.nan], + "D": np.array([np.nan], dtype=object), + "E": Categorical([np.nan], categories=["a"]), + "F": DatetimeIndex([pd.NaT], dtype="M8[ns]"), + "G": to_timedelta([pd.NaT]), + }, + ), + ( + True, + { + "H": [8, 9, np.nan, np.nan], + "I": [8, 9, np.nan, np.nan], + "J": [1, np.nan, np.nan, np.nan], + "K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]), + "L": DatetimeIndex( + ["2000-01-02", "NaT", "NaT", "NaT"], dtype="M8[ns]" + ), + "M": to_timedelta(["1 days", "nan", "nan", "nan"]), + "N": [0, 1, 2, 3], + }, + ), + ( + False, + { + "H": [8, 9, np.nan, np.nan], + "I": [8, 9, np.nan, np.nan], + "J": [1, np.nan, np.nan, np.nan], + "K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]), + "L": DatetimeIndex( + ["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]" + ), + "M": to_timedelta(["nan", "1 days", "nan", "nan"]), + "N": [0, 1, 2, 3], + }, + ), + ], + ) + def test_mode_dropna(self, dropna, expected): + df = DataFrame( + { + "A": [12, 12, 19, 11], + "B": [10, 10, np.nan, 3], + "C": [1, np.nan, np.nan, np.nan], + "D": Series([np.nan, np.nan, "a", np.nan], dtype=object), + "E": Categorical([np.nan, np.nan, "a", np.nan]), + "F": DatetimeIndex(["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"), + "G": to_timedelta(["1 days", "nan", "nan", "nan"]), + "H": [8, 8, 9, 9], + "I": [9, 9, 8, 8], + "J": [1, 1, np.nan, np.nan], + "K": Categorical(["a", np.nan, "a", np.nan]), + "L": DatetimeIndex( + ["2000-01-02", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]" + ), + "M": to_timedelta(["1 days", "nan", "1 days", "nan"]), + "N": np.arange(4, dtype="int64"), + } + ) + + result = df[sorted(expected.keys())].mode(dropna=dropna) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + def test_mode_sortwarning(self, using_infer_string): + # Check for the warning that is raised when the mode + # results cannot be sorted + + df = DataFrame({"A": [np.nan, np.nan, "a", "a"]}) + expected = DataFrame({"A": ["a", np.nan]}) + + warning = None if using_infer_string else UserWarning + with tm.assert_produces_warning(warning): + result = df.mode(dropna=False) + result = result.sort_values(by="A").reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + def test_mode_empty_df(self): + df = DataFrame([], columns=["a", "b"]) + result = df.mode() + expected = DataFrame([], columns=["a", "b"], index=Index([], dtype=np.int64)) + tm.assert_frame_equal(result, expected) + + def test_operators_timedelta64(self): + df = DataFrame( + { + "A": date_range("2012-1-1", periods=3, freq="D"), + "B": date_range("2012-1-2", periods=3, freq="D"), + "C": Timestamp("20120101") - timedelta(minutes=5, seconds=5), + } + ) + + diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]}) + + # min + result = diffs.min() + assert result.iloc[0] == diffs.loc[0, "A"] + assert result.iloc[1] == diffs.loc[0, "B"] + + result = diffs.min(axis=1) + assert (result == diffs.loc[0, "B"]).all() + + # max + result = diffs.max() + assert result.iloc[0] == diffs.loc[2, "A"] + assert result.iloc[1] == diffs.loc[2, "B"] + + result = diffs.max(axis=1) + assert (result == diffs["A"]).all() + + # abs + result = diffs.abs() + result2 = abs(diffs) + expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]}) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + # mixed frame + mixed = diffs.copy() + mixed["C"] = "foo" + mixed["D"] = 1 + mixed["E"] = 1.0 + mixed["F"] = Timestamp("20130101") + + # results in an object array + result = mixed.min() + expected = Series( + [ + pd.Timedelta(timedelta(seconds=5 * 60 + 5)), + pd.Timedelta(timedelta(days=-1)), + "foo", + 1, + 1.0, + Timestamp("20130101"), + ], + index=mixed.columns, + ) + tm.assert_series_equal(result, expected) + + # excludes non-numeric + result = mixed.min(axis=1, numeric_only=True) + expected = Series([1, 1, 1.0], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + # works when only those columns are selected + result = mixed[["A", "B"]].min(1) + expected = Series([timedelta(days=-1)] * 3) + tm.assert_series_equal(result, expected) + + result = mixed[["A", "B"]].min() + expected = Series( + [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"] + ) + tm.assert_series_equal(result, expected) + + # GH 3106 + df = DataFrame( + { + "time": date_range("20130102", periods=5), + "time2": date_range("20130105", periods=5), + } + ) + df["off1"] = df["time2"] - df["time"] + assert df["off1"].dtype == "timedelta64[ns]" + + df["off2"] = df["time"] - df["time2"] + df._consolidate_inplace() + assert df["off1"].dtype == "timedelta64[ns]" + assert df["off2"].dtype == "timedelta64[ns]" + + def test_std_timedelta64_skipna_false(self): + # GH#37392 + tdi = pd.timedelta_range("1 Day", periods=10) + df = DataFrame({"A": tdi, "B": tdi}, copy=True) + df.iloc[-2, -1] = pd.NaT + + result = df.std(skipna=False) + expected = Series( + [df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]" + ) + tm.assert_series_equal(result, expected) + + result = df.std(axis=1, skipna=False) + expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", [["2022-01-01", "2022-01-02", pd.NaT, "2022-01-03"], 4 * [pd.NaT]] + ) + def test_std_datetime64_with_nat( + self, values, skipna, using_array_manager, request, unit + ): + # GH#51335 + if using_array_manager and ( + not skipna or all(value is pd.NaT for value in values) + ): + mark = pytest.mark.xfail( + reason="GH#51446: Incorrect type inference on NaT in reduction result" + ) + request.applymarker(mark) + dti = to_datetime(values).as_unit(unit) + df = DataFrame({"a": dti}) + result = df.std(skipna=skipna) + if not skipna or all(value is pd.NaT for value in values): + expected = Series({"a": pd.NaT}, dtype=f"timedelta64[{unit}]") + else: + # 86400000000000ns == 1 day + expected = Series({"a": 86400000000000}, dtype=f"timedelta64[{unit}]") + tm.assert_series_equal(result, expected) + + def test_sum_corner(self): + empty_frame = DataFrame() + + axis0 = empty_frame.sum(0) + axis1 = empty_frame.sum(1) + assert isinstance(axis0, Series) + assert isinstance(axis1, Series) + assert len(axis0) == 0 + assert len(axis1) == 0 + + @pytest.mark.parametrize( + "index", + [ + RangeIndex(0), + DatetimeIndex([]), + Index([], dtype=np.int64), + Index([], dtype=np.float64), + DatetimeIndex([], freq="ME"), + PeriodIndex([], freq="D"), + ], + ) + def test_axis_1_empty(self, all_reductions, index): + df = DataFrame(columns=["a"], index=index) + result = getattr(df, all_reductions)(axis=1) + if all_reductions in ("any", "all"): + expected_dtype = "bool" + elif all_reductions == "count": + expected_dtype = "int64" + else: + expected_dtype = "object" + expected = Series([], index=index, dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): + idx = ["a", "b", "c"] + df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series([unit, unit, unit], index=idx, dtype="float64") + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) + expected = Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) + expected = Series([unit, unit, unit], index=idx, dtype="float64") + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) + expected = Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) + expected = Series(result, index=["A", "B"]) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) + expected = Series(result, index=["A", "B"]) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ["a", "b", "c"] + df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]}) + + df2 = df.apply(to_timedelta) + + # 0 by default + result = df2.sum() + expected = Series([0, 0, 0], dtype="m8[ns]", index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_min_count(self): + # https://github.com/pandas-dev/pandas/issues/39738 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + result = df.sum(min_count=10) + expected = Series([np.nan, np.nan], index=["x", "y"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"]) + @pytest.mark.parametrize( + "kwargs, expected_result", + [ + ({"axis": 1, "min_count": 2}, [3.2, 5.3, np.nan]), + ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]), + ({"axis": 1, "skipna": False}, [3.2, 5.3, np.nan]), + ], + ) + def test_sum_nanops_dtype_min_count(self, float_type, kwargs, expected_result): + # GH#46947 + df = DataFrame({"a": [1.0, 2.3, 4.4], "b": [2.2, 3, np.nan]}, dtype=float_type) + result = df.sum(**kwargs) + expected = Series(expected_result).astype(float_type) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"]) + @pytest.mark.parametrize( + "kwargs, expected_result", + [ + ({"axis": 1, "min_count": 2}, [2.0, 4.0, np.nan]), + ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]), + ({"axis": 1, "skipna": False}, [2.0, 4.0, np.nan]), + ], + ) + def test_prod_nanops_dtype_min_count(self, float_type, kwargs, expected_result): + # GH#46947 + df = DataFrame( + {"a": [1.0, 2.0, 4.4], "b": [2.0, 2.0, np.nan]}, dtype=float_type + ) + result = df.prod(**kwargs) + expected = Series(expected_result).astype(float_type) + tm.assert_series_equal(result, expected) + + def test_sum_object(self, float_frame): + values = float_frame.values.astype(int) + frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns) + deltas = frame * timedelta(1) + deltas.sum() + + def test_sum_bool(self, float_frame): + # ensure this works, bug report + bools = np.isnan(float_frame) + bools.sum(1) + bools.sum(0) + + def test_sum_mixed_datetime(self): + # GH#30886 + df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex( + [2, 3, 4] + ) + with pytest.raises(TypeError, match="does not support reduction 'sum'"): + df.sum() + + def test_mean_corner(self, float_frame, float_string_frame): + # unit test when have object data + msg = "Could not convert|does not support" + with pytest.raises(TypeError, match=msg): + float_string_frame.mean(axis=0) + + # xs sum mixed type, just want to know it works... + with pytest.raises(TypeError, match="unsupported operand type"): + float_string_frame.mean(axis=1) + + # take mean of boolean column + float_frame["bool"] = float_frame["A"] > 0 + means = float_frame.mean(0) + assert means["bool"] == float_frame["bool"].values.mean() + + def test_mean_datetimelike(self): + # GH#24757 check that datetimelike are excluded by default, handled + # correctly with numeric_only=True + # As of 2.0, datetimelike are *not* excluded with numeric_only=None + + df = DataFrame( + { + "A": np.arange(3), + "B": date_range("2016-01-01", periods=3), + "C": pd.timedelta_range("1D", periods=3), + "D": pd.period_range("2016", periods=3, freq="Y"), + } + ) + result = df.mean(numeric_only=True) + expected = Series({"A": 1.0}) + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError, match="mean is not implemented for PeriodArray"): + df.mean() + + def test_mean_datetimelike_numeric_only_false(self): + df = DataFrame( + { + "A": np.arange(3), + "B": date_range("2016-01-01", periods=3), + "C": pd.timedelta_range("1D", periods=3), + } + ) + + # datetime(tz) and timedelta work + result = df.mean(numeric_only=False) + expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]}) + tm.assert_series_equal(result, expected) + + # mean of period is not allowed + df["D"] = pd.period_range("2016", periods=3, freq="Y") + + with pytest.raises(TypeError, match="mean is not implemented for Period"): + df.mean(numeric_only=False) + + def test_mean_extensionarray_numeric_only_true(self): + # https://github.com/pandas-dev/pandas/issues/33256 + arr = np.random.default_rng(2).integers(1000, size=(10, 5)) + df = DataFrame(arr, dtype="Int64") + result = df.mean(numeric_only=True) + expected = DataFrame(arr).mean().astype("Float64") + tm.assert_series_equal(result, expected) + + def test_stats_mixed_type(self, float_string_frame): + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.std(1) + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.var(1) + with pytest.raises(TypeError, match="unsupported operand type"): + float_string_frame.mean(1) + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.skew(1) + + def test_sum_bools(self): + df = DataFrame(index=range(1), columns=range(10)) + bools = isna(df) + assert bools.sum(axis=1)[0] == 10 + + # ---------------------------------------------------------------------- + # Index of max / min + + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_idxmin(self, float_frame, int_frame, skipna, axis): + frame = float_frame + frame.iloc[5:10] = np.nan + frame.iloc[15:20, -2:] = np.nan + for df in [frame, int_frame]: + warn = None + if skipna is False or axis == 1: + warn = None if df is int_frame else FutureWarning + msg = "The behavior of DataFrame.idxmin with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = df.idxmin(axis=axis, skipna=skipna) + + msg2 = "The behavior of Series.idxmin" + with tm.assert_produces_warning(warn, match=msg2): + expected = df.apply(Series.idxmin, axis=axis, skipna=skipna) + expected = expected.astype(df.index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_idxmin_empty(self, index, skipna, axis): + # GH53265 + if axis == 0: + frame = DataFrame(index=index) + else: + frame = DataFrame(columns=index) + + result = frame.idxmin(axis=axis, skipna=skipna) + expected = Series(dtype=index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_idxmin_numeric_only(self, numeric_only): + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")}) + result = df.idxmin(numeric_only=numeric_only) + if numeric_only: + expected = Series([2, 1], index=["a", "b"]) + else: + expected = Series([2, 1, 0], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + def test_idxmin_axis_2(self, float_frame): + frame = float_frame + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + frame.idxmin(axis=2) + + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_idxmax(self, float_frame, int_frame, skipna, axis): + frame = float_frame + frame.iloc[5:10] = np.nan + frame.iloc[15:20, -2:] = np.nan + for df in [frame, int_frame]: + warn = None + if skipna is False or axis == 1: + warn = None if df is int_frame else FutureWarning + msg = "The behavior of DataFrame.idxmax with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = df.idxmax(axis=axis, skipna=skipna) + + msg2 = "The behavior of Series.idxmax" + with tm.assert_produces_warning(warn, match=msg2): + expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) + expected = expected.astype(df.index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_idxmax_empty(self, index, skipna, axis): + # GH53265 + if axis == 0: + frame = DataFrame(index=index) + else: + frame = DataFrame(columns=index) + + result = frame.idxmax(axis=axis, skipna=skipna) + expected = Series(dtype=index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_idxmax_numeric_only(self, numeric_only): + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")}) + result = df.idxmax(numeric_only=numeric_only) + if numeric_only: + expected = Series([1, 0], index=["a", "b"]) + else: + expected = Series([1, 0, 1], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + def test_idxmax_arrow_types(self): + # GH#55368 + pytest.importorskip("pyarrow") + + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]") + result = df.idxmax() + expected = Series([1, 0], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([2, 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + df = DataFrame({"a": ["b", "c", "a"]}, dtype="string[pyarrow]") + result = df.idxmax(numeric_only=False) + expected = Series([1], index=["a"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin(numeric_only=False) + expected = Series([2], index=["a"]) + tm.assert_series_equal(result, expected) + + def test_idxmax_axis_2(self, float_frame): + frame = float_frame + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + frame.idxmax(axis=2) + + def test_idxmax_mixed_dtype(self): + # don't cast to object, which would raise in nanops + dti = date_range("2016-01-01", periods=3) + + # Copying dti is needed for ArrayManager otherwise when we set + # df.loc[0, 3] = pd.NaT below it edits dti + df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti.copy(deep=True)}) + + result = df.idxmax() + expected = Series([1, 0, 2], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 0], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + # with NaTs + df.loc[0, 3] = pd.NaT + result = df.idxmax() + expected = Series([1, 0, 2], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + # with multi-column dt64 block + df[4] = dti[::-1] + df._consolidate_inplace() + + result = df.idxmax() + expected = Series([1, 0, 2, 0], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1, 2], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "op, expected_value", + [("idxmax", [0, 4]), ("idxmin", [0, 5])], + ) + def test_idxmax_idxmin_convert_dtypes(self, op, expected_value): + # GH 40346 + df = DataFrame( + { + "ID": [100, 100, 100, 200, 200, 200], + "value": [0, 0, 0, 1, 2, 0], + }, + dtype="Int64", + ) + df = df.groupby("ID") + + result = getattr(df, op)() + expected = DataFrame( + {"value": expected_value}, + index=Index([100, 200], name="ID", dtype="Int64"), + ) + tm.assert_frame_equal(result, expected) + + def test_idxmax_dt64_multicolumn_axis1(self): + dti = date_range("2016-01-01", periods=3) + df = DataFrame({3: dti, 4: dti[::-1]}, copy=True) + df.iloc[0, 0] = pd.NaT + + df._consolidate_inplace() + + result = df.idxmax(axis=1) + expected = Series([4, 3, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin(axis=1) + expected = Series([4, 3, 4]) + tm.assert_series_equal(result, expected) + + # ---------------------------------------------------------------------- + # Logical reductions + + @pytest.mark.parametrize("opname", ["any", "all"]) + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("bool_only", [False, True]) + def test_any_all_mixed_float(self, opname, axis, bool_only, float_string_frame): + # make sure op works on mixed-type frame + mixed = float_string_frame + mixed["_bool_"] = np.random.default_rng(2).standard_normal(len(mixed)) > 0.5 + + getattr(mixed, opname)(axis=axis, bool_only=bool_only) + + @pytest.mark.parametrize("opname", ["any", "all"]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_any_all_bool_with_na(self, opname, axis, bool_frame_with_na): + getattr(bool_frame_with_na, opname)(axis=axis, bool_only=False) + + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") + @pytest.mark.parametrize("opname", ["any", "all"]) + def test_any_all_bool_frame(self, opname, bool_frame_with_na): + # GH#12863: numpy gives back non-boolean data for object type + # so fill NaNs to compare with pandas behavior + frame = bool_frame_with_na.fillna(True) + alternative = getattr(np, opname) + f = getattr(frame, opname) + + def skipna_wrapper(x): + nona = x.dropna().values + return alternative(nona) + + def wrapper(x): + return alternative(x.values) + + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + + tm.assert_series_equal(result0, frame.apply(wrapper)) + tm.assert_series_equal(result1, frame.apply(wrapper, axis=1)) + + result0 = f(axis=0) + result1 = f(axis=1) + + tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) + tm.assert_series_equal( + result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False + ) + + # bad axis + with pytest.raises(ValueError, match="No axis named 2"): + f(axis=2) + + # all NA case + all_na = frame * np.nan + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname == "any": + assert not r0.any() + assert not r1.any() + else: + assert r0.all() + assert r1.all() + + def test_any_all_extra(self): + df = DataFrame( + { + "A": [True, False, False], + "B": [True, True, False], + "C": [True, True, True], + }, + index=["a", "b", "c"], + ) + result = df[["A", "B"]].any(axis=1) + expected = Series([True, True, False], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + result = df[["A", "B"]].any(axis=1, bool_only=True) + tm.assert_series_equal(result, expected) + + result = df.all(1) + expected = Series([True, False, False], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + result = df.all(1, bool_only=True) + tm.assert_series_equal(result, expected) + + # Axis is None + result = df.all(axis=None).item() + assert result is False + + result = df.any(axis=None).item() + assert result is True + + result = df[["C"]].all(axis=None).item() + assert result is True + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_any_all_object_dtype( + self, axis, bool_agg_func, skipna, using_infer_string + ): + # GH#35450 + df = DataFrame( + data=[ + [1, np.nan, np.nan, True], + [np.nan, 2, np.nan, True], + [np.nan, np.nan, np.nan, True], + [np.nan, np.nan, "5", np.nan], + ] + ) + if using_infer_string: + # na in object is True while in string pyarrow numpy it's false + val = not axis == 0 and not skipna and bool_agg_func == "all" + else: + val = True + result = getattr(df, bool_agg_func)(axis=axis, skipna=skipna) + expected = Series([True, True, val, True]) + tm.assert_series_equal(result, expected) + + # GH#50947 deprecates this but it is not emitting a warning in some builds. + @pytest.mark.filterwarnings( + "ignore:'any' with datetime64 dtypes is deprecated.*:FutureWarning" + ) + def test_any_datetime(self): + # GH 23070 + float_data = [1, np.nan, 3, np.nan] + datetime_data = [ + Timestamp("1960-02-15"), + Timestamp("1960-02-16"), + pd.NaT, + pd.NaT, + ] + df = DataFrame({"A": float_data, "B": datetime_data}) + + result = df.any(axis=1) + + expected = Series([True, True, True, False]) + tm.assert_series_equal(result, expected) + + def test_any_all_bool_only(self): + # GH 25101 + df = DataFrame( + {"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]}, + columns=Index(["col1", "col2", "col3"], dtype=object), + ) + + result = df.all(bool_only=True) + expected = Series(dtype=np.bool_, index=[]) + tm.assert_series_equal(result, expected) + + df = DataFrame( + { + "col1": [1, 2, 3], + "col2": [4, 5, 6], + "col3": [None, None, None], + "col4": [False, False, True], + } + ) + + result = df.all(bool_only=True) + expected = Series({"col4": False}) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "func, data, expected", + [ + (np.any, {}, False), + (np.all, {}, True), + (np.any, {"A": []}, False), + (np.all, {"A": []}, True), + (np.any, {"A": [False, False]}, False), + (np.all, {"A": [False, False]}, False), + (np.any, {"A": [True, False]}, True), + (np.all, {"A": [True, False]}, False), + (np.any, {"A": [True, True]}, True), + (np.all, {"A": [True, True]}, True), + (np.any, {"A": [False], "B": [False]}, False), + (np.all, {"A": [False], "B": [False]}, False), + (np.any, {"A": [False, False], "B": [False, True]}, True), + (np.all, {"A": [False, False], "B": [False, True]}, False), + # other types + (np.all, {"A": Series([0.0, 1.0], dtype="float")}, False), + (np.any, {"A": Series([0.0, 1.0], dtype="float")}, True), + (np.all, {"A": Series([0, 1], dtype=int)}, False), + (np.any, {"A": Series([0, 1], dtype=int)}, True), + pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False), + pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False), + pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True), + pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True), + pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False), + pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True), + # np.all on Categorical raises, so the reduction drops the + # column, so all is being done on an empty Series, so is True + (np.all, {"A": Series([0, 1], dtype="category")}, True), + (np.any, {"A": Series([0, 1], dtype="category")}, False), + (np.all, {"A": Series([1, 2], dtype="category")}, True), + (np.any, {"A": Series([1, 2], dtype="category")}, False), + # Mix GH#21484 + pytest.param( + np.all, + { + "A": Series([10, 20], dtype="M8[ns]"), + "B": Series([10, 20], dtype="m8[ns]"), + }, + True, + ), + ], + ) + def test_any_all_np_func(self, func, data, expected): + # GH 19976 + data = DataFrame(data) + + if any(isinstance(x, CategoricalDtype) for x in data.dtypes): + with pytest.raises( + TypeError, match="dtype category does not support reduction" + ): + func(data) + + # method version + with pytest.raises( + TypeError, match="dtype category does not support reduction" + ): + getattr(DataFrame(data), func.__name__)(axis=None) + else: + msg = "'(any|all)' with datetime64 dtypes is deprecated" + if data.dtypes.apply(lambda x: x.kind == "M").any(): + warn = FutureWarning + else: + warn = None + + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + # GH#34479 + result = func(data) + assert isinstance(result, np.bool_) + assert result.item() is expected + + # method version + with tm.assert_produces_warning(warn, match=msg): + # GH#34479 + result = getattr(DataFrame(data), func.__name__)(axis=None) + assert isinstance(result, np.bool_) + assert result.item() is expected + + def test_any_all_object(self): + # GH 19976 + result = np.all(DataFrame(columns=["a", "b"])).item() + assert result is True + + result = np.any(DataFrame(columns=["a", "b"])).item() + assert result is False + + def test_any_all_object_bool_only(self): + df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object) + df._consolidate_inplace() + df["C"] = Series([True, True]) + + # Categorical of bools is _not_ considered booly + df["D"] = df["C"].astype("category") + + # The underlying bug is in DataFrame._get_bool_data, so we check + # that while we're here + res = df._get_bool_data() + expected = df[["C"]] + tm.assert_frame_equal(res, expected) + + res = df.all(bool_only=True, axis=0) + expected = Series([True], index=["C"]) + tm.assert_series_equal(res, expected) + + # operating on a subset of columns should not produce a _larger_ Series + res = df[["B", "C"]].all(bool_only=True, axis=0) + tm.assert_series_equal(res, expected) + + assert df.all(bool_only=True, axis=None) + + res = df.any(bool_only=True, axis=0) + expected = Series([True], index=["C"]) + tm.assert_series_equal(res, expected) + + # operating on a subset of columns should not produce a _larger_ Series + res = df[["C"]].any(bool_only=True, axis=0) + tm.assert_series_equal(res, expected) + + assert df.any(bool_only=True, axis=None) + + # --------------------------------------------------------------------- + # Unsorted + + def test_series_broadcasting(self): + # smoke test for numpy warnings + # GH 16378, GH 16306 + df = DataFrame([1.0, 1.0, 1.0]) + df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]}) + s = Series([1, 1, 1]) + s_nan = Series([np.nan, np.nan, 1]) + + with tm.assert_produces_warning(None): + df_nan.clip(lower=s, axis=0) + for op in ["lt", "le", "gt", "ge", "eq", "ne"]: + getattr(df, op)(s_nan, axis=0) + + +class TestDataFrameReductions: + def test_min_max_dt64_with_NaT(self): + # Both NaT and Timestamp are in DataFrame. + df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]}) + + res = df.min() + exp = Series([Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = Series([Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + # GH12941, only NaTs are in DataFrame. + df = DataFrame({"foo": [pd.NaT, pd.NaT]}) + + res = df.min() + exp = Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) + + def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture): + # GH#36907 + tz = tz_naive_fixture + if isinstance(tz, tzlocal) and is_platform_windows(): + pytest.skip( + "GH#37659 OSError raised within tzlocal bc Windows " + "chokes in times before 1970-01-01" + ) + + df = DataFrame( + { + "a": [ + Timestamp("2020-01-01 08:00:00", tz=tz), + Timestamp("1920-02-01 09:00:00", tz=tz), + ], + "b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT], + } + ) + res = df.min(axis=1, skipna=False) + expected = Series([df.loc[0, "a"], pd.NaT]) + assert expected.dtype == df["a"].dtype + + tm.assert_series_equal(res, expected) + + res = df.max(axis=1, skipna=False) + expected = Series([df.loc[0, "b"], pd.NaT]) + assert expected.dtype == df["a"].dtype + + tm.assert_series_equal(res, expected) + + def test_min_max_dt64_api_consistency_with_NaT(self): + # Calling the following sum functions returned an error for dataframes but + # returned NaT for series. These tests check that the API is consistent in + # min/max calls on empty Series/DataFrames. See GH:33704 for more + # information + df = DataFrame({"x": to_datetime([])}) + expected_dt_series = Series(to_datetime([])) + # check axis 0 + assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT) + assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT) + + # check axis 1 + tm.assert_series_equal(df.min(axis=1), expected_dt_series) + tm.assert_series_equal(df.max(axis=1), expected_dt_series) + + def test_min_max_dt64_api_consistency_empty_df(self): + # check DataFrame/Series api consistency when calling min/max on an empty + # DataFrame/Series. + df = DataFrame({"x": []}) + expected_float_series = Series([], dtype=float) + # check axis 0 + assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min()) + assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max()) + # check axis 1 + tm.assert_series_equal(df.min(axis=1), expected_float_series) + tm.assert_series_equal(df.min(axis=1), expected_float_series) + + @pytest.mark.parametrize( + "initial", + ["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone + ) + @pytest.mark.parametrize("method", ["min", "max"]) + def test_preserve_timezone(self, initial: str, method): + # GH 28552 + initial_dt = to_datetime(initial) + expected = Series([initial_dt]) + df = DataFrame([expected]) + result = getattr(df, method)(axis=1) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_minmax_tzaware_skipna_axis_1(self, method, skipna): + # GH#51242 + val = to_datetime("1900-01-01", utc=True) + df = DataFrame( + {"a": Series([pd.NaT, pd.NaT, val]), "b": Series([pd.NaT, val, val])} + ) + op = getattr(df, method) + result = op(axis=1, skipna=skipna) + if skipna: + expected = Series([pd.NaT, val, val]) + else: + expected = Series([pd.NaT, pd.NaT, val]) + tm.assert_series_equal(result, expected) + + def test_frame_any_with_timedelta(self): + # GH#17667 + df = DataFrame( + { + "a": Series([0, 0]), + "t": Series([to_timedelta(0, "s"), to_timedelta(1, "ms")]), + } + ) + + result = df.any(axis=0) + expected = Series(data=[False, True], index=["a", "t"]) + tm.assert_series_equal(result, expected) + + result = df.any(axis=1) + expected = Series(data=[False, True]) + tm.assert_series_equal(result, expected) + + def test_reductions_skipna_none_raises( + self, request, frame_or_series, all_reductions + ): + if all_reductions == "count": + request.applymarker( + pytest.mark.xfail(reason="Count does not accept skipna") + ) + obj = frame_or_series([1, 2, 3]) + msg = 'For argument "skipna" expected type bool, received type NoneType.' + with pytest.raises(ValueError, match=msg): + getattr(obj, all_reductions)(skipna=None) + + @td.skip_array_manager_invalid_test + def test_reduction_timestamp_smallest_unit(self): + # GH#52524 + df = DataFrame( + { + "a": Series([Timestamp("2019-12-31")], dtype="datetime64[s]"), + "b": Series( + [Timestamp("2019-12-31 00:00:00.123")], dtype="datetime64[ms]" + ), + } + ) + result = df.max() + expected = Series( + [Timestamp("2019-12-31"), Timestamp("2019-12-31 00:00:00.123")], + dtype="datetime64[ms]", + index=["a", "b"], + ) + tm.assert_series_equal(result, expected) + + @td.skip_array_manager_not_yet_implemented + def test_reduction_timedelta_smallest_unit(self): + # GH#52524 + df = DataFrame( + { + "a": Series([pd.Timedelta("1 days")], dtype="timedelta64[s]"), + "b": Series([pd.Timedelta("1 days")], dtype="timedelta64[ms]"), + } + ) + result = df.max() + expected = Series( + [pd.Timedelta("1 days"), pd.Timedelta("1 days")], + dtype="timedelta64[ms]", + index=["a", "b"], + ) + tm.assert_series_equal(result, expected) + + +class TestNuisanceColumns: + @pytest.mark.parametrize("method", ["any", "all"]) + def test_any_all_categorical_dtype_nuisance_column(self, method): + # GH#36076 DataFrame should match Series behavior + ser = Series([0, 1], dtype="category", name="A") + df = ser.to_frame() + + # Double-check the Series behavior is to raise + with pytest.raises(TypeError, match="does not support reduction"): + getattr(ser, method)() + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(np, method)(ser) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(df, method)(bool_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(df, method)(bool_only=None) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(np, method)(df, axis=0) + + def test_median_categorical_dtype_nuisance_column(self): + # GH#21020 DataFrame.median should match Series.median + df = DataFrame({"A": Categorical([1, 2, 2, 2, 3])}) + ser = df["A"] + + # Double-check the Series behavior is to raise + with pytest.raises(TypeError, match="does not support reduction"): + ser.median() + + with pytest.raises(TypeError, match="does not support reduction"): + df.median(numeric_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median() + + # same thing, but with an additional non-categorical column + df["B"] = df["A"].astype(int) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median(numeric_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median() + + # TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead + # of expected.values + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method): + # GH#28949 DataFrame.min should behave like Series.min + cat = Categorical(["a", "b", "c", "b"], ordered=False) + ser = Series(cat) + df = ser.to_frame("A") + + # Double-check the Series behavior + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(ser, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(ser) + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)(numeric_only=False) + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(df, axis=0) + + # same thing, but with an additional non-categorical column + df["B"] = df["A"].astype(object) + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(df, axis=0) + + +class TestEmptyDataFrameReductions: + @pytest.mark.parametrize( + "opname, dtype, exp_value, exp_dtype", + [ + ("sum", np.int8, 0, np.int64), + ("prod", np.int8, 1, np.int_), + ("sum", np.int64, 0, np.int64), + ("prod", np.int64, 1, np.int64), + ("sum", np.uint8, 0, np.uint64), + ("prod", np.uint8, 1, np.uint), + ("sum", np.uint64, 0, np.uint64), + ("prod", np.uint64, 1, np.uint64), + ("sum", np.float32, 0, np.float32), + ("prod", np.float32, 1, np.float32), + ("sum", np.float64, 0, np.float64), + ], + ) + def test_df_empty_min_count_0(self, opname, dtype, exp_value, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=0) + + expected = Series([exp_value, exp_value], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "opname, dtype, exp_dtype", + [ + ("sum", np.int8, np.float64), + ("prod", np.int8, np.float64), + ("sum", np.int64, np.float64), + ("prod", np.int64, np.float64), + ("sum", np.uint8, np.float64), + ("prod", np.uint8, np.float64), + ("sum", np.uint64, np.float64), + ("prod", np.uint64, np.float64), + ("sum", np.float32, np.float32), + ("prod", np.float32, np.float32), + ("sum", np.float64, np.float64), + ], + ) + def test_df_empty_min_count_1(self, opname, dtype, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=1) + + expected = Series([np.nan, np.nan], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "opname, dtype, exp_value, exp_dtype", + [ + ("sum", "Int8", 0, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("sum", "Int64", 0, "Int64"), + ("prod", "Int64", 1, "Int64"), + ("sum", "UInt8", 0, ("UInt32" if is_windows_np2_or_is32 else "UInt64")), + ("prod", "UInt8", 1, ("UInt32" if is_windows_np2_or_is32 else "UInt64")), + ("sum", "UInt64", 0, "UInt64"), + ("prod", "UInt64", 1, "UInt64"), + ("sum", "Float32", 0, "Float32"), + ("prod", "Float32", 1, "Float32"), + ("sum", "Float64", 0, "Float64"), + ], + ) + def test_df_empty_nullable_min_count_0(self, opname, dtype, exp_value, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=0) + + expected = Series([exp_value, exp_value], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + # TODO: why does min_count=1 impact the resulting Windows dtype + # differently than min_count=0? + @pytest.mark.parametrize( + "opname, dtype, exp_dtype", + [ + ("sum", "Int8", ("Int32" if is_windows_or_is32 else "Int64")), + ("prod", "Int8", ("Int32" if is_windows_or_is32 else "Int64")), + ("sum", "Int64", "Int64"), + ("prod", "Int64", "Int64"), + ("sum", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")), + ("prod", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")), + ("sum", "UInt64", "UInt64"), + ("prod", "UInt64", "UInt64"), + ("sum", "Float32", "Float32"), + ("prod", "Float32", "Float32"), + ("sum", "Float64", "Float64"), + ], + ) + def test_df_empty_nullable_min_count_1(self, opname, dtype, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=1) + + expected = Series([pd.NA, pd.NA], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + +def test_sum_timedelta64_skipna_false(using_array_manager, request): + # GH#17235 + if using_array_manager: + mark = pytest.mark.xfail( + reason="Incorrect type inference on NaT in reduction result" + ) + request.applymarker(mark) + + arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2) + arr[-1, -1] = "Nat" + + df = DataFrame(arr) + assert (df.dtypes == arr.dtype).all() + + result = df.sum(skipna=False) + expected = Series([pd.Timedelta(seconds=12), pd.NaT], dtype="m8[s]") + tm.assert_series_equal(result, expected) + + result = df.sum(axis=0, skipna=False) + tm.assert_series_equal(result, expected) + + result = df.sum(axis=1, skipna=False) + expected = Series( + [ + pd.Timedelta(seconds=1), + pd.Timedelta(seconds=5), + pd.Timedelta(seconds=9), + pd.NaT, + ], + dtype="m8[s]", + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="sum doesn't work with arrow strings" +) +def test_mixed_frame_with_integer_sum(): + # https://github.com/pandas-dev/pandas/issues/34520 + df = DataFrame([["a", 1]], columns=list("ab")) + df = df.astype({"b": "Int64"}) + result = df.sum() + expected = Series(["a", 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False, None]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_minmax_extensionarray(method, numeric_only): + # https://github.com/pandas-dev/pandas/issues/32651 + int64_info = np.iinfo("int64") + ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype()) + df = DataFrame({"Int64": ser}) + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series( + [getattr(int64_info, method)], + dtype="Int64", + index=Index(["Int64"]), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ts_value", [Timestamp("2000-01-01"), pd.NaT]) +def test_frame_mixed_numeric_object_with_timestamp(ts_value): + # GH 13912 + df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]}) + with pytest.raises(TypeError, match="does not support reduction"): + df.sum() + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"], dtype=object) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False) + + +@pytest.mark.parametrize("method", ["min", "max", "mean", "median", "skew", "kurt"]) +@pytest.mark.parametrize("numeric_only", [True, False]) +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_reduction_axis_none_returns_scalar(method, numeric_only, dtype): + # GH#21597 As of 2.0, axis=None reduces over all axes. + + df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), dtype=dtype) + + result = getattr(df, method)(axis=None, numeric_only=numeric_only) + np_arr = df.to_numpy(dtype=np.float64) + if method in {"skew", "kurt"}: + comp_mod = pytest.importorskip("scipy.stats") + if method == "kurt": + method = "kurtosis" + expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None) + tm.assert_almost_equal(result, expected) + else: + expected = getattr(np, method)(np_arr, axis=None) + assert result == expected + + +@pytest.mark.parametrize( + "kernel", + [ + "corr", + "corrwith", + "cov", + "idxmax", + "idxmin", + "kurt", + "max", + "mean", + "median", + "min", + "prod", + "quantile", + "sem", + "skew", + "std", + "sum", + "var", + ], +) +def test_fails_on_non_numeric(kernel): + # GH#46852 + df = DataFrame({"a": [1, 2, 3], "b": object}) + args = (df,) if kernel == "corrwith" else () + msg = "|".join( + [ + "not allowed for this dtype", + "argument must be a string or a number", + "not supported between instances of", + "unsupported operand type", + "argument must be a string or a real number", + ] + ) + if kernel == "median": + # slightly different message on different builds + msg1 = ( + r"Cannot convert \[\[ " + r"\]\] to numeric" + ) + msg2 = ( + r"Cannot convert \[ " + r"\] to numeric" + ) + msg = "|".join([msg1, msg2]) + with pytest.raises(TypeError, match=msg): + getattr(df, kernel)(*args) + + +@pytest.mark.parametrize( + "method", + [ + "all", + "any", + "count", + "idxmax", + "idxmin", + "kurt", + "kurtosis", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + "product", + "sem", + "skew", + "std", + "sum", + "var", + ], +) +@pytest.mark.parametrize("min_count", [0, 2]) +def test_numeric_ea_axis_1(method, skipna, min_count, any_numeric_ea_dtype): + # GH 54341 + df = DataFrame( + { + "a": Series([0, 1, 2, 3], dtype=any_numeric_ea_dtype), + "b": Series([0, 1, pd.NA, 3], dtype=any_numeric_ea_dtype), + }, + ) + expected_df = DataFrame( + { + "a": [0.0, 1.0, 2.0, 3.0], + "b": [0.0, 1.0, np.nan, 3.0], + }, + ) + if method in ("count", "nunique"): + expected_dtype = "int64" + elif method in ("all", "any"): + expected_dtype = "boolean" + elif method in ( + "kurt", + "kurtosis", + "mean", + "median", + "sem", + "skew", + "std", + "var", + ) and not any_numeric_ea_dtype.startswith("Float"): + expected_dtype = "Float64" + else: + expected_dtype = any_numeric_ea_dtype + + kwargs = {} + if method not in ("count", "nunique", "quantile"): + kwargs["skipna"] = skipna + if method in ("prod", "product", "sum"): + kwargs["min_count"] = min_count + + warn = None + msg = None + if not skipna and method in ("idxmax", "idxmin"): + warn = FutureWarning + msg = f"The behavior of DataFrame.{method} with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(df, method)(axis=1, **kwargs) + with tm.assert_produces_warning(warn, match=msg): + expected = getattr(expected_df, method)(axis=1, **kwargs) + if method not in ("idxmax", "idxmin"): + expected = expected.astype(expected_dtype) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_repr.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..776007fb9691d3f6faa185ed037bdd2ab1fd47fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_repr.py @@ -0,0 +1,521 @@ +from datetime import ( + datetime, + timedelta, +) +from io import StringIO + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas import ( + NA, + Categorical, + CategoricalIndex, + DataFrame, + IntervalIndex, + MultiIndex, + NaT, + PeriodIndex, + Series, + Timestamp, + date_range, + option_context, + period_range, +) +import pandas._testing as tm + + +class TestDataFrameRepr: + def test_repr_should_return_str(self): + # https://docs.python.org/3/reference/datamodel.html#object.__repr__ + # "...The return value must be a string object." + + # (str on py2.x, str (unicode) on py3) + + data = [8, 5, 3, 5] + index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"] + cols = ["\u03c8"] + df = DataFrame(data, columns=cols, index=index1) + assert type(df.__repr__()) is str # noqa: E721 + + ser = df[cols[0]] + assert type(ser.__repr__()) is str # noqa: E721 + + def test_repr_bytes_61_lines(self): + # GH#12857 + lets = list("ACDEFGHIJKLMNOP") + words = np.random.default_rng(2).choice(lets, (1000, 50)) + df = DataFrame(words).astype("U1") + assert (df.dtypes == object).all() + + # smoke tests; at one point this raised with 61 but not 60 + repr(df) + repr(df.iloc[:60, :]) + repr(df.iloc[:61, :]) + + def test_repr_unicode_level_names(self, frame_or_series): + index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"]) + + obj = DataFrame(np.random.default_rng(2).standard_normal((2, 4)), index=index) + obj = tm.get_obj(obj, frame_or_series) + repr(obj) + + def test_assign_index_sequences(self): + # GH#2200 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index( + ["a", "b"] + ) + index = list(df.index) + index[0] = ("faz", "boo") + df.index = index + repr(df) + + # this travels an improper code path + index[0] = ["faz", "boo"] + df.index = index + repr(df) + + def test_repr_with_mi_nat(self): + df = DataFrame({"X": [1, 2]}, index=[[NaT, Timestamp("20130101")], ["a", "b"]]) + result = repr(df) + expected = " X\nNaT a 1\n2013-01-01 b 2" + assert result == expected + + def test_repr_with_different_nulls(self): + # GH45263 + df = DataFrame([1, 2, 3, 4], [True, None, np.nan, NaT]) + result = repr(df) + expected = """ 0 +True 1 +None 2 +NaN 3 +NaT 4""" + assert result == expected + + def test_repr_with_different_nulls_cols(self): + # GH45263 + d = {np.nan: [1, 2], None: [3, 4], NaT: [6, 7], True: [8, 9]} + df = DataFrame(data=d) + result = repr(df) + expected = """ NaN None NaT True +0 1 3 6 8 +1 2 4 7 9""" + assert result == expected + + def test_multiindex_na_repr(self): + # only an issue with long columns + df3 = DataFrame( + { + "A" * 30: {("A", "A0006000", "nuit"): "A0006000"}, + "B" * 30: {("A", "A0006000", "nuit"): np.nan}, + "C" * 30: {("A", "A0006000", "nuit"): np.nan}, + "D" * 30: {("A", "A0006000", "nuit"): np.nan}, + "E" * 30: {("A", "A0006000", "nuit"): "A"}, + "F" * 30: {("A", "A0006000", "nuit"): np.nan}, + } + ) + + idf = df3.set_index(["A" * 30, "C" * 30]) + repr(idf) + + def test_repr_name_coincide(self): + index = MultiIndex.from_tuples( + [("a", 0, "foo"), ("b", 1, "bar")], names=["a", "b", "c"] + ) + + df = DataFrame({"value": [0, 1]}, index=index) + + lines = repr(df).split("\n") + assert lines[2].startswith("a 0 foo") + + def test_repr_to_string( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + repr(frame) + repr(ymd) + repr(frame.T) + repr(ymd.T) + + buf = StringIO() + frame.to_string(buf=buf) + ymd.to_string(buf=buf) + frame.T.to_string(buf=buf) + ymd.T.to_string(buf=buf) + + def test_repr_empty(self): + # empty + repr(DataFrame()) + + # empty with index + frame = DataFrame(index=np.arange(1000)) + repr(frame) + + def test_repr_mixed(self, float_string_frame): + # mixed + repr(float_string_frame) + + @pytest.mark.slow + def test_repr_mixed_big(self): + # big mixed + biggie = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(200), + "B": [str(i) for i in range(200)], + }, + index=range(200), + ) + biggie.loc[:20, "A"] = np.nan + biggie.loc[:20, "B"] = np.nan + + repr(biggie) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="/r in") + def test_repr(self): + # columns but no index + no_index = DataFrame(columns=[0, 1, 3]) + repr(no_index) + + df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"]) + assert "\t" not in repr(df) + assert "\r" not in repr(df) + assert "a\n" not in repr(df) + + def test_repr_dimensions(self): + df = DataFrame([[1, 2], [3, 4]]) + with option_context("display.show_dimensions", True): + assert "2 rows x 2 columns" in repr(df) + + with option_context("display.show_dimensions", False): + assert "2 rows x 2 columns" not in repr(df) + + with option_context("display.show_dimensions", "truncate"): + assert "2 rows x 2 columns" not in repr(df) + + @pytest.mark.slow + def test_repr_big(self): + # big one + biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200)) + repr(biggie) + + def test_repr_unsortable(self): + # columns are not sortable + + unsortable = DataFrame( + { + "foo": [1] * 50, + datetime.today(): [1] * 50, + "bar": ["bar"] * 50, + datetime.today() + timedelta(1): ["bar"] * 50, + }, + index=np.arange(50), + ) + repr(unsortable) + + def test_repr_float_frame_options(self, float_frame): + repr(float_frame) + + with option_context("display.precision", 3): + repr(float_frame) + + with option_context("display.max_rows", 10, "display.max_columns", 2): + repr(float_frame) + + with option_context("display.max_rows", 1000, "display.max_columns", 1000): + repr(float_frame) + + def test_repr_unicode(self): + uval = "\u03c3\u03c3\u03c3\u03c3" + + df = DataFrame({"A": [uval, uval]}) + + result = repr(df) + ex_top = " A" + assert result.split("\n")[0].rstrip() == ex_top + + df = DataFrame({"A": [uval, uval]}) + result = repr(df) + assert result.split("\n")[0].rstrip() == ex_top + + def test_unicode_string_with_unicode(self): + df = DataFrame({"A": ["\u05d0"]}) + str(df) + + def test_repr_unicode_columns(self): + df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]}) + repr(df.columns) # should not raise UnicodeDecodeError + + def test_str_to_bytes_raises(self): + # GH 26447 + df = DataFrame({"A": ["abc"]}) + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(df) + + def test_very_wide_repr(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 20)), + columns=np.array(["a" * 10] * 20, dtype=object), + ) + repr(df) + + def test_repr_column_name_unicode_truncation_bug(self): + # #1906 + df = DataFrame( + { + "Id": [7117434], + "StringCol": ( + "Is it possible to modify drop plot code" + "so that the output graph is displayed " + "in iphone simulator, Is it possible to " + "modify drop plot code so that the " + "output graph is \xe2\x80\xa8displayed " + "in iphone simulator.Now we are adding " + "the CSV file externally. I want to Call " + "the File through the code.." + ), + } + ) + + with option_context("display.max_columns", 20): + assert "StringCol" in repr(df) + + def test_latex_repr(self): + pytest.importorskip("jinja2") + expected = r"""\begin{tabular}{llll} +\toprule + & 0 & 1 & 2 \\ +\midrule +0 & $\alpha$ & b & c \\ +1 & 1 & 2 & 3 \\ +\bottomrule +\end{tabular} +""" + with option_context( + "styler.format.escape", None, "styler.render.repr", "latex" + ): + df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]]) + result = df._repr_latex_() + assert result == expected + + # GH 12182 + assert df._repr_latex_() is None + + def test_repr_with_datetimeindex(self): + df = DataFrame({"A": [1, 2, 3]}, index=date_range("2000", periods=3)) + result = repr(df) + expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3" + assert result == expected + + def test_repr_with_intervalindex(self): + # https://github.com/pandas-dev/pandas/pull/24134/files + df = DataFrame( + {"A": [1, 2, 3, 4]}, index=IntervalIndex.from_breaks([0, 1, 2, 3, 4]) + ) + result = repr(df) + expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4" + assert result == expected + + def test_repr_with_categorical_index(self): + df = DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) + result = repr(df) + expected = " A\na 1\nb 2\nc 3" + assert result == expected + + def test_repr_categorical_dates_periods(self): + # normal DataFrame + dt = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + p = period_range("2011-01", freq="M", periods=5) + df = DataFrame({"dt": dt, "p": p}) + exp = """ dt p +0 2011-01-01 09:00:00-05:00 2011-01 +1 2011-01-01 10:00:00-05:00 2011-02 +2 2011-01-01 11:00:00-05:00 2011-03 +3 2011-01-01 12:00:00-05:00 2011-04 +4 2011-01-01 13:00:00-05:00 2011-05""" + + assert repr(df) == exp + + df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)}) + assert repr(df2) == exp + + @pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64]) + @pytest.mark.parametrize( + "box, expected", + [[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]], + ) + def test_repr_np_nat_with_object(self, arg, box, expected): + # GH 25445 + result = repr(box([arg("NaT")], dtype=object)) + assert result == expected + + def test_frame_datetime64_pre1900_repr(self): + df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="YE-DEC")}) + # it works! + repr(df) + + def test_frame_to_string_with_periodindex(self): + index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") + frame = DataFrame(np.random.default_rng(2).standard_normal((3, 4)), index=index) + + # it works! + frame.to_string() + + def test_to_string_ea_na_in_multiindex(self): + # GH#47986 + df = DataFrame( + {"a": [1, 2]}, + index=MultiIndex.from_arrays([Series([NA, 1], dtype="Int64")]), + ) + + result = df.to_string() + expected = """ a + 1 +1 2""" + assert result == expected + + def test_datetime64tz_slice_non_truncate(self): + # GH 30263 + df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")}) + expected = repr(df) + df = df.iloc[:, :5] + result = repr(df) + assert result == expected + + def test_to_records_no_typeerror_in_repr(self): + # GH 48526 + df = DataFrame([["a", "b"], ["c", "d"], ["e", "f"]], columns=["left", "right"]) + df["record"] = df[["left", "right"]].to_records() + expected = """ left right record +0 a b [0, a, b] +1 c d [1, c, d] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_na_record_value(self): + # GH 48526 + df = DataFrame( + [["a", np.nan], ["c", "d"], ["e", "f"]], columns=["left", "right"] + ) + df["record"] = df[["left", "right"]].to_records() + expected = """ left right record +0 a NaN [0, a, nan] +1 c d [1, c, d] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_na_record(self): + # GH 48526 + df = DataFrame( + [["a", "b"], [np.nan, np.nan], ["e", "f"]], columns=[np.nan, "right"] + ) + df["record"] = df[[np.nan, "right"]].to_records() + expected = """ NaN right record +0 a b [0, a, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_inf_as_na_record(self): + # GH 48526 + expected = """ NaN inf record +0 inf b [0, inf, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with option_context("use_inf_as_na", True): + df = DataFrame( + [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]], + columns=[np.nan, np.inf], + ) + df["record"] = df[[np.nan, np.inf]].to_records() + result = repr(df) + assert result == expected + + def test_to_records_with_inf_record(self): + # GH 48526 + expected = """ NaN inf record +0 inf b [0, inf, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with option_context("use_inf_as_na", False): + df = DataFrame( + [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]], + columns=[np.nan, np.inf], + ) + df["record"] = df[[np.nan, np.inf]].to_records() + result = repr(df) + assert result == expected + + def test_masked_ea_with_formatter(self): + # GH#39336 + df = DataFrame( + { + "a": Series([0.123456789, 1.123456789], dtype="Float64"), + "b": Series([1, 2], dtype="Int64"), + } + ) + result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format]) + expected = """ a b +0 0.12 1.00 +1 1.12 2.00""" + assert result == expected + + def test_repr_ea_columns(self, any_string_dtype): + # GH#54797 + pytest.importorskip("pyarrow") + df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]}) + df.columns = df.columns.astype(any_string_dtype) + expected = """ long_column_name col2 +0 1 4 +1 2 5 +2 3 6""" + assert repr(df) == expected + + +@pytest.mark.parametrize( + "data,output", + [ + ([2, complex("nan"), 1], [" 2.0+0.0j", " NaN+0.0j", " 1.0+0.0j"]), + ([2, complex("nan"), -1], [" 2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]), + ([-2, complex("nan"), -1], ["-2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]), + ([-1.23j, complex("nan"), -1], ["-0.00-1.23j", " NaN+0.00j", "-1.00+0.00j"]), + ([1.23j, complex("nan"), 1.23], [" 0.00+1.23j", " NaN+0.00j", " 1.23+0.00j"]), + ( + [-1.23j, complex(np.nan, np.nan), 1], + ["-0.00-1.23j", " NaN+ NaNj", " 1.00+0.00j"], + ), + ( + [-1.23j, complex(1.2, np.nan), 1], + ["-0.00-1.23j", " 1.20+ NaNj", " 1.00+0.00j"], + ), + ( + [-1.23j, complex(np.nan, -1.2), 1], + ["-0.00-1.23j", " NaN-1.20j", " 1.00+0.00j"], + ), + ], +) +@pytest.mark.parametrize("as_frame", [True, False]) +def test_repr_with_complex_nans(data, output, as_frame): + # GH#53762, GH#53841 + obj = Series(np.array(data)) + if as_frame: + obj = obj.to_frame(name="val") + reprs = [f"{i} {val}" for i, val in enumerate(output)] + expected = f"{'val': >{len(reprs[0])}}\n" + "\n".join(reprs) + else: + reprs = [f"{i} {val}" for i, val in enumerate(output)] + expected = "\n".join(reprs) + "\ndtype: complex128" + assert str(obj) == expected, f"\n{str(obj)}\n\n{expected}" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_stack_unstack.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_stack_unstack.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b92091260a3b0fbde9ddd99e46d31d1fdbf320 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_stack_unstack.py @@ -0,0 +1,2678 @@ +from datetime import datetime +import itertools +import re + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + Timedelta, + date_range, +) +import pandas._testing as tm +from pandas.core.reshape import reshape as reshape_lib + + +@pytest.fixture(params=[True, False]) +def future_stack(request): + return request.param + + +class TestDataFrameReshape: + def test_stack_unstack(self, float_frame, future_stack): + df = float_frame.copy() + df[:] = np.arange(np.prod(df.shape)).reshape(df.shape) + + stacked = df.stack(future_stack=future_stack) + stacked_df = DataFrame({"foo": stacked, "bar": stacked}) + + unstacked = stacked.unstack() + unstacked_df = stacked_df.unstack() + + tm.assert_frame_equal(unstacked, df) + tm.assert_frame_equal(unstacked_df["bar"], df) + + unstacked_cols = stacked.unstack(0) + unstacked_cols_df = stacked_df.unstack(0) + tm.assert_frame_equal(unstacked_cols.T, df) + tm.assert_frame_equal(unstacked_cols_df["bar"].T, df) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_mixed_level(self, future_stack): + # GH 18310 + levels = [range(3), [3, "a", "b"], [1, 2]] + + # flat columns: + df = DataFrame(1, index=levels[0], columns=levels[1]) + result = df.stack(future_stack=future_stack) + expected = Series(1, index=MultiIndex.from_product(levels[:2])) + tm.assert_series_equal(result, expected) + + # MultiIndex columns: + df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:])) + result = df.stack(1, future_stack=future_stack) + expected = DataFrame( + 1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1] + ) + tm.assert_frame_equal(result, expected) + + # as above, but used labels in level are actually of homogeneous type + result = df[["a", "b"]].stack(1, future_stack=future_stack) + expected = expected[["a", "b"]] + tm.assert_frame_equal(result, expected) + + def test_unstack_not_consolidated(self, using_array_manager): + # Gh#34708 + df = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]}) + df2 = df[["x"]] + df2["y"] = df["y"] + if not using_array_manager: + assert len(df2._mgr.blocks) == 2 + + res = df2.unstack() + expected = df.unstack() + tm.assert_series_equal(res, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_unstack_fill(self, future_stack): + # GH #9746: fill_value keyword argument for Series + # and DataFrame unstack + + # From a series + data = Series([1, 2, 4, 5], dtype=np.int16) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack(fill_value=-1) + expected = DataFrame( + {"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16 + ) + tm.assert_frame_equal(result, expected) + + # From a series with incorrect data type for fill_value + result = data.unstack(fill_value=0.5) + expected = DataFrame( + {"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float + ) + tm.assert_frame_equal(result, expected) + + # GH #13971: fill_value when unstacking multiple levels: + df = DataFrame( + {"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]} + ).set_index(["x", "y", "z"]) + unstacked = df.unstack(["x", "y"], fill_value=0) + key = ("w", "b", "j") + expected = unstacked[key] + result = Series([0, 0, 2], index=unstacked.index, name=key) + tm.assert_series_equal(result, expected) + + stacked = unstacked.stack(["x", "y"], future_stack=future_stack) + stacked.index = stacked.index.reorder_levels(df.index.names) + # Workaround for GH #17886 (unnecessarily casts to float): + stacked = stacked.astype(np.int64) + result = stacked.loc[df.index] + tm.assert_frame_equal(result, df) + + # From a series + s = df["w"] + result = s.unstack(["x", "y"], fill_value=0) + expected = unstacked["w"] + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame(self): + # From a dataframe + rows = [[1, 2], [3, 4], [5, 6], [7, 8]] + df = DataFrame(rows, columns=list("AB"), dtype=np.int32) + df.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = df.unstack(fill_value=-1) + + rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]] + expected = DataFrame(rows, index=list("xyz"), dtype=np.int32) + expected.columns = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")] + ) + tm.assert_frame_equal(result, expected) + + # From a mixed type dataframe + df["A"] = df["A"].astype(np.int16) + df["B"] = df["B"].astype(np.float64) + + result = df.unstack(fill_value=-1) + expected["A"] = expected["A"].astype(np.int16) + expected["B"] = expected["B"].astype(np.float64) + tm.assert_frame_equal(result, expected) + + # From a dataframe with incorrect data type for fill_value + result = df.unstack(fill_value=0.5) + + rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]] + expected = DataFrame(rows, index=list("xyz"), dtype=float) + expected.columns = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")] + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_datetime(self): + # Test unstacking with date times + dv = date_range("2012-01-01", periods=4).values + data = Series(dv) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=dv[0]) + expected = DataFrame( + {"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_timedelta(self): + # Test unstacking with time deltas + td = [Timedelta(days=i) for i in range(4)] + data = Series(td) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=td[1]) + expected = DataFrame( + {"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_period(self): + # Test unstacking with period + periods = [ + Period("2012-01"), + Period("2012-02"), + Period("2012-03"), + Period("2012-04"), + ] + data = Series(periods) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=periods[1]) + expected = DataFrame( + { + "a": [periods[0], periods[1], periods[3]], + "b": [periods[1], periods[2], periods[1]], + }, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_categorical(self): + # Test unstacking with categorical + data = Series(["a", "b", "c", "a"], dtype="category") + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + # By default missing values will be NaN + result = data.unstack() + expected = DataFrame( + { + "a": pd.Categorical(list("axa"), categories=list("abc")), + "b": pd.Categorical(list("bcx"), categories=list("abc")), + }, + index=list("xyz"), + ) + tm.assert_frame_equal(result, expected) + + # Fill with non-category results in a ValueError + msg = r"Cannot setitem on a Categorical with a new category \(d\)" + with pytest.raises(TypeError, match=msg): + data.unstack(fill_value="d") + + # Fill with category value replaces missing values as expected + result = data.unstack(fill_value="c") + expected = DataFrame( + { + "a": pd.Categorical(list("aca"), categories=list("abc")), + "b": pd.Categorical(list("bcc"), categories=list("abc")), + }, + index=list("xyz"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_tuplename_in_multiindex(self): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] + ) + df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx) + result = df.unstack(("A", "a")) + + expected = DataFrame( + [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]], + columns=MultiIndex.from_tuples( + [ + ("d", "a"), + ("d", "b"), + ("d", "c"), + ("e", "a"), + ("e", "b"), + ("e", "c"), + ], + names=[None, ("A", "a")], + ), + index=Index([1, 2, 3], name=("B", "b")), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "unstack_idx, expected_values, expected_index, expected_columns", + [ + ( + ("A", "a"), + [[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]], + MultiIndex.from_tuples( + [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"] + ), + MultiIndex.from_tuples( + [("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")], + names=[None, ("A", "a")], + ), + ), + ( + (("A", "a"), "B"), + [[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]], + Index([3, 4], name="C"), + MultiIndex.from_tuples( + [ + ("d", "a", 1), + ("d", "a", 2), + ("d", "b", 1), + ("d", "b", 2), + ("e", "a", 1), + ("e", "a", 2), + ("e", "b", 1), + ("e", "b", 2), + ], + names=[None, ("A", "a"), "B"], + ), + ), + ], + ) + def test_unstack_mixed_type_name_in_multiindex( + self, unstack_idx, expected_values, expected_index, expected_columns + ): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] + ) + df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx) + result = df.unstack(unstack_idx) + + expected = DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_preserve_dtypes(self): + # Checks fix for #11847 + df = DataFrame( + { + "state": ["IL", "MI", "NC"], + "index": ["a", "b", "c"], + "some_categories": Series(["a", "b", "c"]).astype("category"), + "A": np.random.default_rng(2).random(3), + "B": 1, + "C": "foo", + "D": pd.Timestamp("20010102"), + "E": Series([1.0, 50.0, 100.0]).astype("float32"), + "F": Series([3.0, 4.0, 5.0]).astype("float64"), + "G": False, + "H": Series([1, 200, 923442]).astype("int8"), + } + ) + + def unstack_and_compare(df, column_name): + unstacked1 = df.unstack([column_name]) + unstacked2 = df.unstack(column_name) + tm.assert_frame_equal(unstacked1, unstacked2) + + df1 = df.set_index(["state", "index"]) + unstack_and_compare(df1, "index") + + df1 = df.set_index(["state", "some_categories"]) + unstack_and_compare(df1, "some_categories") + + df1 = df.set_index(["F", "C"]) + unstack_and_compare(df1, "F") + + df1 = df.set_index(["G", "B", "state"]) + unstack_and_compare(df1, "B") + + df1 = df.set_index(["E", "A"]) + unstack_and_compare(df1, "E") + + df1 = df.set_index(["state", "index"]) + s = df1["A"] + unstack_and_compare(s, "index") + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_ints(self, future_stack): + columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3))) + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 27)), columns=columns + ) + + tm.assert_frame_equal( + df.stack(level=[1, 2], future_stack=future_stack), + df.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + tm.assert_frame_equal( + df.stack(level=[-2, -1], future_stack=future_stack), + df.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + + df_named = df.copy() + return_value = df_named.columns.set_names(range(3), inplace=True) + assert return_value is None + + tm.assert_frame_equal( + df_named.stack(level=[1, 2], future_stack=future_stack), + df_named.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_mixed_levels(self, future_stack): + columns = MultiIndex.from_tuples( + [ + ("A", "cat", "long"), + ("B", "cat", "long"), + ("A", "dog", "short"), + ("B", "dog", "short"), + ], + names=["exp", "animal", "hair_length"], + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=columns + ) + + animal_hair_stacked = df.stack( + level=["animal", "hair_length"], future_stack=future_stack + ) + exp_hair_stacked = df.stack( + level=["exp", "hair_length"], future_stack=future_stack + ) + + # GH #8584: Need to check that stacking works when a number + # is passed that is both a level name and in the range of + # the level numbers + df2 = df.copy() + df2.columns.names = ["exp", "animal", 1] + tm.assert_frame_equal( + df2.stack(level=["animal", 1], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=["exp", 1], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + # When mixed types are passed and the ints are not level + # names, raise + msg = ( + "level should contain all level names or all level numbers, not " + "a mixture of the two" + ) + with pytest.raises(ValueError, match=msg): + df2.stack(level=["animal", 0], future_stack=future_stack) + + # GH #8584: Having 0 in the level names could raise a + # strange error about lexsort depth + df3 = df.copy() + df3.columns.names = ["exp", "animal", 0] + tm.assert_frame_equal( + df3.stack(level=["animal", 0], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_int_level_names(self, future_stack): + columns = MultiIndex.from_tuples( + [ + ("A", "cat", "long"), + ("B", "cat", "long"), + ("A", "dog", "short"), + ("B", "dog", "short"), + ], + names=["exp", "animal", "hair_length"], + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=columns + ) + + exp_animal_stacked = df.stack( + level=["exp", "animal"], future_stack=future_stack + ) + animal_hair_stacked = df.stack( + level=["animal", "hair_length"], future_stack=future_stack + ) + exp_hair_stacked = df.stack( + level=["exp", "hair_length"], future_stack=future_stack + ) + + df2 = df.copy() + df2.columns.names = [0, 1, 2] + tm.assert_frame_equal( + df2.stack(level=[1, 2], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=[0, 1], future_stack=future_stack), + exp_animal_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=[0, 2], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + # Out-of-order int column names + df3 = df.copy() + df3.columns.names = [2, 0, 1] + tm.assert_frame_equal( + df3.stack(level=[0, 1], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df3.stack(level=[2, 0], future_stack=future_stack), + exp_animal_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df3.stack(level=[2, 1], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + def test_unstack_bool(self): + df = DataFrame( + [False, False], + index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]), + columns=["col"], + ) + rs = df.unstack() + xp = DataFrame( + np.array([[False, np.nan], [np.nan, False]], dtype=object), + index=["a", "b"], + columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]), + ) + tm.assert_frame_equal(rs, xp) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_unstack_level_binding(self, future_stack): + # GH9856 + mi = MultiIndex( + levels=[["foo", "bar"], ["one", "two"], ["a", "b"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]], + names=["first", "second", "third"], + ) + s = Series(0, index=mi) + result = s.unstack([1, 2]).stack(0, future_stack=future_stack) + + expected_mi = MultiIndex( + levels=[["foo", "bar"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["first", "second"], + ) + + expected = DataFrame( + np.array( + [[0, np.nan], [np.nan, 0], [0, np.nan], [np.nan, 0]], dtype=np.float64 + ), + index=expected_mi, + columns=Index(["b", "a"], name="third"), + ) + + tm.assert_frame_equal(result, expected) + + def test_unstack_to_series(self, float_frame): + # check reversibility + data = float_frame.unstack() + + assert isinstance(data, Series) + undo = data.unstack().T + tm.assert_frame_equal(undo, float_frame) + + # check NA handling + data = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]}) + data.index = Index(["a", "b", "c"]) + result = data.unstack() + + midx = MultiIndex( + levels=[["x", "y"], ["a", "b", "c"]], + codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], + ) + expected = Series([1, 2, np.nan, 3, 4, np.nan], index=midx) + + tm.assert_series_equal(result, expected) + + # check composability of unstack + old_data = data.copy() + for _ in range(4): + data = data.unstack() + tm.assert_frame_equal(old_data, data) + + def test_unstack_dtypes(self, using_infer_string): + # GH 2929 + rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]] + + df = DataFrame(rows, columns=list("ABCD")) + result = df.dtypes + expected = Series([np.dtype("int64")] * 4, index=list("ABCD")) + tm.assert_series_equal(result, expected) + + # single dtype + df2 = df.set_index(["A", "B"]) + df3 = df2.unstack("B") + result = df3.dtypes + expected = Series( + [np.dtype("int64")] * 4, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + + # mixed + df2 = df.set_index(["A", "B"]) + df2["C"] = 3.0 + df3 = df2.unstack("B") + result = df3.dtypes + expected = Series( + [np.dtype("float64")] * 2 + [np.dtype("int64")] * 2, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + df2["D"] = "foo" + df3 = df2.unstack("B") + result = df3.dtypes + dtype = "string" if using_infer_string else np.dtype("object") + expected = Series( + [np.dtype("float64")] * 2 + [dtype] * 2, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "c, d", + ( + (np.zeros(5), np.zeros(5)), + (np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")), + ), + ) + def test_unstack_dtypes_mixed_date(self, c, d): + # GH7405 + df = DataFrame( + { + "A": ["a"] * 5, + "C": c, + "D": d, + "B": date_range("2012-01-01", periods=5), + } + ) + + right = df.iloc[:3].copy(deep=True) + + df = df.set_index(["A", "B"]) + df["D"] = df["D"].astype("int64") + + left = df.iloc[:3].unstack(0) + right = right.set_index(["A", "B"]).unstack(0) + right[("D", "a")] = right[("D", "a")].astype("int64") + + assert left.shape == (3, 2) + tm.assert_frame_equal(left, right) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_unstack_non_unique_index_names(self, future_stack): + idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"]) + df = DataFrame([1, 2], index=idx) + msg = "The name c1 occurs multiple times, use a level number" + with pytest.raises(ValueError, match=msg): + df.unstack("c1") + + with pytest.raises(ValueError, match=msg): + df.T.stack("c1", future_stack=future_stack) + + def test_unstack_unused_levels(self): + # GH 17845: unused codes in index make unstack() cast int to float + idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1] + df = DataFrame([[1, 0]] * 3, index=idx) + + result = df.unstack() + exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]]) + expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col) + tm.assert_frame_equal(result, expected) + assert (result.columns.levels[1] == idx.levels[1]).all() + + # Unused items on both levels + levels = [[0, 1, 7], [0, 1, 2, 3]] + codes = [[0, 0, 1, 1], [0, 2, 0, 2]] + idx = MultiIndex(levels, codes) + block = np.arange(4).reshape(2, 2) + df = DataFrame(np.concatenate([block, block + 4]), index=idx) + result = df.unstack() + expected = DataFrame( + np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx + ) + tm.assert_frame_equal(result, expected) + assert (result.columns.levels[1] == idx.levels[1]).all() + + @pytest.mark.parametrize( + "level, idces, col_level, idx_level", + ( + (0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]), + (1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]), + ), + ) + def test_unstack_unused_levels_mixed_with_nan( + self, level, idces, col_level, idx_level + ): + # With mixed dtype and NaN + levels = [["a", 2, "c"], [1, 3, 5, 7]] + codes = [[0, -1, 1, 1], [0, 2, -1, 2]] + idx = MultiIndex(levels, codes) + data = np.arange(8) + df = DataFrame(data.reshape(4, 2), index=idx) + + result = df.unstack(level=level) + exp_data = np.zeros(18) * np.nan + exp_data[idces] = data + cols = MultiIndex.from_product([[0, 1], col_level]) + expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("cols", [["A", "C"], slice(None)]) + def test_unstack_unused_level(self, cols): + # GH 18562 : unused codes on the unstacked level + df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"]) + + ind = df.set_index(["A", "B", "C"], drop=False) + selection = ind.loc[(slice(None), slice(None), "I"), cols] + result = selection.unstack() + + expected = ind.iloc[[0]][cols] + expected.columns = MultiIndex.from_product( + [expected.columns, ["I"]], names=[None, "C"] + ) + expected.index = expected.index.droplevel("C") + tm.assert_frame_equal(result, expected) + + def test_unstack_long_index(self): + # PH 32624: Error when using a lot of indices to unstack. + # The error occurred only, if a lot of indices are used. + df = DataFrame( + [[1]], + columns=MultiIndex.from_tuples([[0]], names=["c1"]), + index=MultiIndex.from_tuples( + [[0, 0, 1, 0, 0, 0, 1]], + names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"], + ), + ) + result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"]) + expected = DataFrame( + [[1]], + columns=MultiIndex.from_tuples( + [[0, 0, 1, 0, 0, 0, 1]], + names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"], + ), + index=Index([0], name="i1"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_multi_level_cols(self): + # PH 24729: Unstack a df with multi level columns + df = DataFrame( + [[0.0, 0.0], [0.0, 0.0]], + columns=MultiIndex.from_tuples( + [["B", "C"], ["B", "D"]], names=["c1", "c2"] + ), + index=MultiIndex.from_tuples( + [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] + ), + ) + assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"] + + def test_unstack_multi_level_rows_and_cols(self): + # PH 28306: Unstack df with multi level cols and rows + df = DataFrame( + [[1, 2], [3, 4], [-1, -2], [-3, -4]], + columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]), + index=MultiIndex.from_tuples( + [ + ["m1", "P3", 222], + ["m1", "A5", 111], + ["m2", "P3", 222], + ["m2", "A5", 111], + ], + names=["i1", "i2", "i3"], + ), + ) + result = df.unstack(["i3", "i2"]) + expected = df.unstack(["i3"]).unstack(["i2"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("idx", [("jim", "joe"), ("joe", "jim")]) + @pytest.mark.parametrize("lev", list(range(2))) + def test_unstack_nan_index1(self, idx, lev): + # GH7466 + def cast(val): + val_str = "" if val != val else val + return f"{val_str:1}" + + df = DataFrame( + { + "jim": ["a", "b", np.nan, "d"], + "joe": ["w", "x", "y", "z"], + "jolie": ["a.w", "b.x", " .y", "d.z"], + } + ) + + left = df.set_index(["jim", "joe"]).unstack()["jolie"] + right = df.set_index(["joe", "jim"]).unstack()["jolie"].T + tm.assert_frame_equal(left, right) + + mi = df.set_index(list(idx)) + udf = mi.unstack(level=lev) + assert udf.notna().values.sum() == len(df) + mk_list = lambda a: list(a) if isinstance(a, tuple) else [a] + rows, cols = udf["jolie"].notna().values.nonzero() + for i, j in zip(rows, cols): + left = sorted(udf["jolie"].iloc[i, j].split(".")) + right = mk_list(udf["jolie"].index[i]) + mk_list(udf["jolie"].columns[j]) + right = sorted(map(cast, right)) + assert left == right + + @pytest.mark.parametrize("idx", itertools.permutations(["1st", "2nd", "3rd"])) + @pytest.mark.parametrize("lev", list(range(3))) + @pytest.mark.parametrize("col", ["4th", "5th"]) + def test_unstack_nan_index_repeats(self, idx, lev, col): + def cast(val): + val_str = "" if val != val else val + return f"{val_str:1}" + + df = DataFrame( + { + "1st": ["d"] * 3 + + [np.nan] * 5 + + ["a"] * 2 + + ["c"] * 3 + + ["e"] * 2 + + ["b"] * 5, + "2nd": ["y"] * 2 + + ["w"] * 3 + + [np.nan] * 3 + + ["z"] * 4 + + [np.nan] * 3 + + ["x"] * 3 + + [np.nan] * 2, + "3rd": [ + 67, + 39, + 53, + 72, + 57, + 80, + 31, + 18, + 11, + 30, + 59, + 50, + 62, + 59, + 76, + 52, + 14, + 53, + 60, + 51, + ], + } + ) + + df["4th"], df["5th"] = ( + df.apply(lambda r: ".".join(map(cast, r)), axis=1), + df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1), + ) + + mi = df.set_index(list(idx)) + udf = mi.unstack(level=lev) + assert udf.notna().values.sum() == 2 * len(df) + mk_list = lambda a: list(a) if isinstance(a, tuple) else [a] + rows, cols = udf[col].notna().values.nonzero() + for i, j in zip(rows, cols): + left = sorted(udf[col].iloc[i, j].split(".")) + right = mk_list(udf[col].index[i]) + mk_list(udf[col].columns[j]) + right = sorted(map(cast, right)) + assert left == right + + def test_unstack_nan_index2(self): + # GH7403 + df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [ + [3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7], + ] + vals = list(map(list, zip(*vals))) + idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B") + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[2, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]] + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + idx = Index([np.nan, 0, 1, 2, 3], name="B") + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]] + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + idx = Index([np.nan, 0, 1, 2, 3], name="B") + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + def test_unstack_nan_index3(self, using_array_manager): + # GH7401 + df = DataFrame( + { + "A": list("aaaaabbbbb"), + "B": (date_range("2012-01-01", periods=5).tolist() * 2), + "C": np.arange(10), + } + ) + + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack() + + vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]]) + idx = Index(["a", "b"], name="A") + cols = MultiIndex( + levels=[["C"], date_range("2012-01-01", periods=5)], + codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], + names=[None, "B"], + ) + + right = DataFrame(vals, columns=cols, index=idx) + if using_array_manager: + # INFO(ArrayManager) with ArrayManager preserve dtype where possible + cols = right.columns[[1, 2, 3, 5]] + right[cols] = right[cols].astype(df["C"].dtype) + tm.assert_frame_equal(left, right) + + def test_unstack_nan_index4(self): + # GH4862 + vals = [ + ["Hg", np.nan, np.nan, 680585148], + ["U", 0.0, np.nan, 680585148], + ["Pb", 7.07e-06, np.nan, 680585148], + ["Sn", 2.3614e-05, 0.0133, 680607017], + ["Ag", 0.0, 0.0133, 680607017], + ["Hg", -0.00015, 0.0133, 680607017], + ] + df = DataFrame( + vals, + columns=["agent", "change", "dosage", "s_id"], + index=[17263, 17264, 17265, 17266, 17267, 17268], + ) + + left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack() + + vals = [ + [np.nan, np.nan, 7.07e-06, np.nan, 0.0], + [0.0, -0.00015, np.nan, 2.3614e-05, np.nan], + ] + + idx = MultiIndex( + levels=[[680585148, 680607017], [0.0133]], + codes=[[0, 1], [-1, 0]], + names=["s_id", "dosage"], + ) + + cols = MultiIndex( + levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]], + codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]], + names=[None, "agent"], + ) + + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"]) + tm.assert_frame_equal(left.unstack(), right) + + def test_unstack_nan_index5(self): + # GH9497 - multiple unstack with nulls + df = DataFrame( + { + "1st": [1, 2, 1, 2, 1, 2], + "2nd": date_range("2014-02-01", periods=6, freq="D"), + "jim": 100 + np.arange(6), + "joe": (np.random.default_rng(2).standard_normal(6) * 10).round(2), + } + ) + + df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02") + df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan + df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan + + left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"]) + assert left.notna().values.sum() == 2 * len(df) + + for col in ["jim", "joe"]: + for _, r in df.iterrows(): + key = r["1st"], (col, r["2nd"], r["3rd"]) + assert r[col] == left.loc[key] + + def test_stack_datetime_column_multiIndex(self, future_stack): + # GH 8039 + t = datetime(2014, 1, 1) + df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")])) + warn = None if future_stack else FutureWarning + msg = "The previous implementation of stack is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = df.stack(future_stack=future_stack) + + eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)]) + ecols = MultiIndex.from_tuples([(t, "A")]) + expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize( + "multiindex_columns", + [ + [0, 1, 2, 3, 4], + [0, 1, 2, 3], + [0, 1, 2, 4], + [0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [0, 1], + [0, 2], + [0, 3], + [0], + [2], + [4], + [4, 3, 2, 1, 0], + [3, 2, 1, 0], + [4, 2, 1, 0], + [2, 1, 0], + [3, 2, 1], + [4, 3, 2], + [1, 0], + [2, 0], + [3, 0], + ], + ) + @pytest.mark.parametrize("level", (-1, 0, 1, [0, 1], [1, 0])) + def test_stack_partial_multiIndex(self, multiindex_columns, level, future_stack): + # GH 8844 + dropna = False if not future_stack else lib.no_default + full_multiindex = MultiIndex.from_tuples( + [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")], + names=["Upper", "Lower"], + ) + multiindex = full_multiindex[multiindex_columns] + df = DataFrame( + np.arange(3 * len(multiindex)).reshape(3, len(multiindex)), + columns=multiindex, + ) + result = df.stack(level=level, dropna=dropna, future_stack=future_stack) + + if isinstance(level, int) and not future_stack: + # Stacking a single level should not make any all-NaN rows, + # so df.stack(level=level, dropna=False) should be the same + # as df.stack(level=level, dropna=True). + expected = df.stack(level=level, dropna=True, future_stack=future_stack) + if isinstance(expected, Series): + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + df.columns = MultiIndex.from_tuples( + df.columns.to_numpy(), names=df.columns.names + ) + expected = df.stack(level=level, dropna=dropna, future_stack=future_stack) + if isinstance(expected, Series): + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_full_multiIndex(self, future_stack): + # GH 8844 + full_multiindex = MultiIndex.from_tuples( + [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")], + names=["Upper", "Lower"], + ) + df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]]) + dropna = False if not future_stack else lib.no_default + result = df.stack(dropna=dropna, future_stack=future_stack) + expected = DataFrame( + [[0, 2], [1, np.nan], [3, 5], [4, np.nan]], + index=MultiIndex( + levels=[[0, 1], ["u", "x", "y", "z"]], + codes=[[0, 0, 1, 1], [1, 3, 1, 3]], + names=[None, "Lower"], + ), + columns=Index(["B", "C"], name="Upper"), + ) + expected["B"] = expected["B"].astype(df.dtypes.iloc[0]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [False, True]) + def test_stack_preserve_categorical_dtype(self, ordered, future_stack): + # GH13854 + cidx = pd.CategoricalIndex(list("yxz"), categories=list("xyz"), ordered=ordered) + df = DataFrame([[10, 11, 12]], columns=cidx) + result = df.stack(future_stack=future_stack) + + # `MultiIndex.from_product` preserves categorical dtype - + # it's tested elsewhere. + midx = MultiIndex.from_product([df.index, cidx]) + expected = Series([10, 11, 12], index=midx) + + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize("ordered", [False, True]) + @pytest.mark.parametrize( + "labels,data", + [ + (list("xyz"), [10, 11, 12, 13, 14, 15]), + (list("zyx"), [14, 15, 12, 13, 10, 11]), + ], + ) + def test_stack_multi_preserve_categorical_dtype( + self, ordered, labels, data, future_stack + ): + # GH-36991 + cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered) + cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered) + midx = MultiIndex.from_product([cidx, cidx2]) + df = DataFrame([sorted(data)], columns=midx) + result = df.stack([0, 1], future_stack=future_stack) + + labels = labels if future_stack else sorted(labels) + s_cidx = pd.CategoricalIndex(labels, ordered=ordered) + expected_data = sorted(data) if future_stack else data + expected = Series( + expected_data, index=MultiIndex.from_product([[0], s_cidx, cidx2]) + ) + + tm.assert_series_equal(result, expected) + + def test_stack_preserve_categorical_dtype_values(self, future_stack): + # GH-23077 + cat = pd.Categorical(["a", "a", "b", "c"]) + df = DataFrame({"A": cat, "B": cat}) + result = df.stack(future_stack=future_stack) + index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]]) + expected = Series( + pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") + @pytest.mark.parametrize( + "index, columns", + [ + ([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ], + ) + def test_stack_multi_columns_non_unique_index(self, index, columns, future_stack): + # GH-28301 + + df = DataFrame(index=index, columns=columns).fillna(1) + stacked = df.stack(future_stack=future_stack) + new_index = MultiIndex.from_tuples(stacked.index.to_numpy()) + expected = DataFrame( + stacked.to_numpy(), index=new_index, columns=stacked.columns + ) + tm.assert_frame_equal(stacked, expected) + stacked_codes = np.asarray(stacked.index.codes) + expected_codes = np.asarray(new_index.codes) + tm.assert_numpy_array_equal(stacked_codes, expected_codes) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize( + "vals1, vals2, dtype1, dtype2, expected_dtype", + [ + ([1, 2], [3.0, 4.0], "Int64", "Float64", "Float64"), + ([1, 2], ["foo", "bar"], "Int64", "string", "object"), + ], + ) + def test_stack_multi_columns_mixed_extension_types( + self, vals1, vals2, dtype1, dtype2, expected_dtype, future_stack + ): + # GH45740 + df = DataFrame( + { + ("A", 1): Series(vals1, dtype=dtype1), + ("A", 2): Series(vals2, dtype=dtype2), + } + ) + result = df.stack(future_stack=future_stack) + expected = ( + df.astype(object).stack(future_stack=future_stack).astype(expected_dtype) + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("level", [0, 1]) + def test_unstack_mixed_extension_types(self, level): + index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"]) + df = DataFrame( + { + "A": pd.array([0, 1, None], dtype="Int64"), + "B": pd.Categorical(["a", "a", "b"]), + }, + index=index, + ) + + result = df.unstack(level=level) + expected = df.astype(object).unstack(level=level) + if level == 0: + expected[("A", "B")] = expected[("A", "B")].fillna(pd.NA) + else: + expected[("A", 0)] = expected[("A", 0)].fillna(pd.NA) + + expected_dtypes = Series( + [df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns + ) + tm.assert_series_equal(result.dtypes, expected_dtypes) + tm.assert_frame_equal(result.astype(object), expected) + + @pytest.mark.parametrize("level", [0, "baz"]) + def test_unstack_swaplevel_sortlevel(self, level): + # GH 20994 + mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"]) + df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"]) + df.columns.name = "foo" + + expected = DataFrame( + [[3, 1, 2, 0]], + columns=MultiIndex.from_tuples( + [("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"] + ), + ) + expected.index.name = "bar" + + result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_unstack_sort_false(frame_or_series, dtype): + # GH 15105 + index = MultiIndex.from_tuples( + [("two", "z", "b"), ("two", "y", "a"), ("one", "z", "b"), ("one", "y", "a")] + ) + obj = frame_or_series(np.arange(1.0, 5.0), index=index, dtype=dtype) + result = obj.unstack(level=-1, sort=False) + + if frame_or_series is DataFrame: + expected_columns = MultiIndex.from_tuples([(0, "b"), (0, "a")]) + else: + expected_columns = ["b", "a"] + expected = DataFrame( + [[1.0, np.nan], [np.nan, 2.0], [3.0, np.nan], [np.nan, 4.0]], + columns=expected_columns, + index=MultiIndex.from_tuples( + [("two", "z"), ("two", "y"), ("one", "z"), ("one", "y")] + ), + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + result = obj.unstack(level=[1, 2], sort=False) + + if frame_or_series is DataFrame: + expected_columns = MultiIndex.from_tuples([(0, "z", "b"), (0, "y", "a")]) + else: + expected_columns = MultiIndex.from_tuples([("z", "b"), ("y", "a")]) + expected = DataFrame( + [[1.0, 2.0], [3.0, 4.0]], + index=["two", "one"], + columns=expected_columns, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_fill_frame_object(): + # GH12815 Test unstacking with object. + data = Series(["a", "b", "c", "a"], dtype="object") + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + # By default missing values will be NaN + result = data.unstack() + expected = DataFrame( + {"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, + index=list("xyz"), + dtype=object, + ) + tm.assert_frame_equal(result, expected) + + # Fill with any value replaces missing values as expected + result = data.unstack(fill_value="d") + expected = DataFrame( + {"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz"), dtype=object + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_timezone_aware_values(): + # GH 18338 + df = DataFrame( + { + "timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")], + "a": ["a"], + "b": ["b"], + "c": ["c"], + }, + columns=["timestamp", "a", "b", "c"], + ) + result = df.set_index(["a", "b"]).unstack() + expected = DataFrame( + [[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]], + index=Index(["a"], name="a"), + columns=MultiIndex( + levels=[["timestamp", "c"], ["b"]], + codes=[[0, 1], [0, 0]], + names=[None, "b"], + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_stack_timezone_aware_values(future_stack): + # GH 19420 + ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York") + df = DataFrame({"A": ts}, index=["a", "b", "c"]) + result = df.stack(future_stack=future_stack) + expected = Series( + ts, + index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated") +@pytest.mark.parametrize("dropna", [True, False, lib.no_default]) +def test_stack_empty_frame(dropna, future_stack): + # GH 36113 + levels = [np.array([], dtype=np.int64), np.array([], dtype=np.int64)] + expected = Series(dtype=np.float64, index=MultiIndex(levels=levels, codes=[[], []])) + if future_stack and dropna is not lib.no_default: + with pytest.raises(ValueError, match="dropna must be unspecified"): + DataFrame(dtype=np.float64).stack(dropna=dropna, future_stack=future_stack) + else: + result = DataFrame(dtype=np.float64).stack( + dropna=dropna, future_stack=future_stack + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated") +@pytest.mark.parametrize("dropna", [True, False, lib.no_default]) +@pytest.mark.parametrize("fill_value", [None, 0]) +def test_stack_unstack_empty_frame(dropna, fill_value, future_stack): + # GH 36113 + if future_stack and dropna is not lib.no_default: + with pytest.raises(ValueError, match="dropna must be unspecified"): + DataFrame(dtype=np.int64).stack( + dropna=dropna, future_stack=future_stack + ).unstack(fill_value=fill_value) + else: + result = ( + DataFrame(dtype=np.int64) + .stack(dropna=dropna, future_stack=future_stack) + .unstack(fill_value=fill_value) + ) + expected = DataFrame(dtype=np.int64) + tm.assert_frame_equal(result, expected) + + +def test_unstack_single_index_series(): + # GH 36113 + msg = r"index must be a MultiIndex to unstack.*" + with pytest.raises(ValueError, match=msg): + Series(dtype=np.int64).unstack() + + +def test_unstacking_multi_index_df(): + # see gh-30740 + df = DataFrame( + { + "name": ["Alice", "Bob"], + "score": [9.5, 8], + "employed": [False, True], + "kids": [0, 0], + "gender": ["female", "male"], + } + ) + df = df.set_index(["name", "employed", "kids", "gender"]) + df = df.unstack(["gender"], fill_value=0) + expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0) + result = df.unstack(["employed", "kids"], fill_value=0) + expected = DataFrame( + [[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]], + index=Index(["Alice", "Bob"], name="name"), + columns=MultiIndex.from_tuples( + [ + ("score", "female", False, 0), + ("score", "female", True, 0), + ("score", "male", False, 0), + ("score", "male", True, 0), + ], + names=[None, "gender", "employed", "kids"], + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated") +def test_stack_positional_level_duplicate_column_names(future_stack): + # https://github.com/pandas-dev/pandas/issues/36353 + columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) + df = DataFrame([[1, 1, 1, 1]], columns=columns) + result = df.stack(0, future_stack=future_stack) + + new_columns = Index(["y", "z"], name="a") + new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) + expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) + + tm.assert_frame_equal(result, expected) + + +def test_unstack_non_slice_like_blocks(using_array_manager): + # Case where the mgr_locs of a DataFrame's underlying blocks are not slice-like + + mi = MultiIndex.from_product([range(5), ["A", "B", "C"]]) + df = DataFrame( + { + 0: np.random.default_rng(2).standard_normal(15), + 1: np.random.default_rng(2).standard_normal(15).astype(np.int64), + 2: np.random.default_rng(2).standard_normal(15), + 3: np.random.default_rng(2).standard_normal(15), + }, + index=mi, + ) + if not using_array_manager: + assert any(not x.mgr_locs.is_slice_like for x in df._mgr.blocks) + + res = df.unstack() + + expected = pd.concat([df[n].unstack() for n in range(4)], keys=range(4), axis=1) + tm.assert_frame_equal(res, expected) + + +@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated") +def test_stack_sort_false(future_stack): + # GH 15105 + data = [[1, 2, 3.0, 4.0], [2, 3, 4.0, 5.0], [3, 4, np.nan, np.nan]] + df = DataFrame( + data, + columns=MultiIndex( + levels=[["B", "A"], ["x", "y"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] + ), + ) + kwargs = {} if future_stack else {"sort": False} + result = df.stack(level=0, future_stack=future_stack, **kwargs) + if future_stack: + expected = DataFrame( + { + "x": [1.0, 3.0, 2.0, 4.0, 3.0, np.nan], + "y": [2.0, 4.0, 3.0, 5.0, 4.0, np.nan], + }, + index=MultiIndex.from_arrays( + [[0, 0, 1, 1, 2, 2], ["B", "A", "B", "A", "B", "A"]] + ), + ) + else: + expected = DataFrame( + {"x": [1.0, 3.0, 2.0, 4.0, 3.0], "y": [2.0, 4.0, 3.0, 5.0, 4.0]}, + index=MultiIndex.from_arrays([[0, 0, 1, 1, 2], ["B", "A", "B", "A", "B"]]), + ) + tm.assert_frame_equal(result, expected) + + # Codes sorted in this call + df = DataFrame( + data, + columns=MultiIndex.from_arrays([["B", "B", "A", "A"], ["x", "y", "x", "y"]]), + ) + kwargs = {} if future_stack else {"sort": False} + result = df.stack(level=0, future_stack=future_stack, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated") +def test_stack_sort_false_multi_level(future_stack): + # GH 15105 + idx = MultiIndex.from_tuples([("weight", "kg"), ("height", "m")]) + df = DataFrame([[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=idx) + kwargs = {} if future_stack else {"sort": False} + result = df.stack([0, 1], future_stack=future_stack, **kwargs) + expected_index = MultiIndex.from_tuples( + [ + ("cat", "weight", "kg"), + ("cat", "height", "m"), + ("dog", "weight", "kg"), + ("dog", "height", "m"), + ] + ) + expected = Series([1.0, 2.0, 3.0, 4.0], index=expected_index) + tm.assert_series_equal(result, expected) + + +class TestStackUnstackMultiLevel: + def test_unstack(self, multiindex_year_month_day_dataframe_random_data): + # just check that it works for now + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack() + unstacked.unstack() + + # test that ints work + ymd.astype(int).unstack() + + # test that int32 work + ymd.astype(np.int32).unstack() + + @pytest.mark.parametrize( + "result_rows,result_columns,index_product,expected_row", + [ + ( + [[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]], + ["ix1", "ix2", "col1", "col2", "col3", "col4"], + 2, + [None, None, 30.0, None], + ), + ( + [[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + 2, + [None, None, 30.0], + ), + ( + [[1, 1, None, None, 30.0], [2, None, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + None, + [None, None, 30.0], + ), + ], + ) + def test_unstack_partial( + self, result_rows, result_columns, index_product, expected_row + ): + # check for regressions on this issue: + # https://github.com/pandas-dev/pandas/issues/19351 + # make sure DataFrame.unstack() works when its run on a subset of the DataFrame + # and the Index levels contain values that are not present in the subset + result = DataFrame(result_rows, columns=result_columns).set_index( + ["ix1", "ix2"] + ) + result = result.iloc[1:2].unstack("ix2") + expected = DataFrame( + [expected_row], + columns=MultiIndex.from_product( + [result_columns[2:], [index_product]], names=[None, "ix2"] + ), + index=Index([2], name="ix1"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_multiple_no_empty_columns(self): + index = MultiIndex.from_tuples( + [(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)] + ) + + s = Series(np.random.default_rng(2).standard_normal(4), index=index) + + unstacked = s.unstack([1, 2]) + expected = unstacked.dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack(self, multiindex_year_month_day_dataframe_random_data, future_stack): + ymd = multiindex_year_month_day_dataframe_random_data + + # regular roundtrip + unstacked = ymd.unstack() + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked, ymd) + + unlexsorted = ymd.sort_index(level=2) + + unstacked = unlexsorted.unstack(2) + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted[::-1] + unstacked = unlexsorted.unstack(1) + restacked = unstacked.stack(future_stack=future_stack).swaplevel(1, 2) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted.swaplevel(0, 1) + unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1) + restacked = unstacked.stack(0, future_stack=future_stack).swaplevel(1, 2) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + # columns unsorted + unstacked = ymd.unstack() + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked, ymd) + + # more than 2 levels in the columns + unstacked = ymd.unstack(1).unstack(1) + + result = unstacked.stack(1, future_stack=future_stack) + expected = ymd.unstack() + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(2, future_stack=future_stack) + expected = ymd.unstack(1) + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(0, future_stack=future_stack) + expected = ymd.stack(future_stack=future_stack).unstack(1).unstack(1) + tm.assert_frame_equal(result, expected) + + # not all levels present in each echelon + unstacked = ymd.unstack(2).loc[:, ::3] + stacked = unstacked.stack(future_stack=future_stack).stack( + future_stack=future_stack + ) + ymd_stacked = ymd.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + stacked = stacked.dropna(how="all") + ymd_stacked = ymd_stacked.dropna(how="all") + tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index)) + + # stack with negative number + result = ymd.unstack(0).stack(-2, future_stack=future_stack) + expected = ymd.unstack(0).stack(0, future_stack=future_stack) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "idx, columns, exp_idx", + [ + [ + list("abab"), + ["1st", "2nd", "1st"], + MultiIndex( + levels=[["a", "b"], ["1st", "2nd"]], + codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)], + ), + ], + [ + MultiIndex.from_tuples((("a", 2), ("b", 1), ("a", 1), ("b", 2))), + ["1st", "2nd", "1st"], + MultiIndex( + levels=[["a", "b"], [1, 2], ["1st", "2nd"]], + codes=[ + np.tile(np.arange(2).repeat(3), 2), + np.repeat([1, 0, 1], [3, 6, 3]), + np.tile([0, 1, 0], 4), + ], + ), + ], + ], + ) + def test_stack_duplicate_index(self, idx, columns, exp_idx, future_stack): + # GH10417 + df = DataFrame( + np.arange(12).reshape(4, 3), + index=idx, + columns=columns, + ) + if future_stack: + msg = "Columns with duplicate values are not supported in stack" + with pytest.raises(ValueError, match=msg): + df.stack(future_stack=future_stack) + else: + result = df.stack(future_stack=future_stack) + expected = Series(np.arange(12), index=exp_idx) + tm.assert_series_equal(result, expected) + assert result.index.is_unique is False + li, ri = result.index, expected.index + tm.assert_index_equal(li, ri) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_unstack_odd_failure(self, future_stack): + mi = MultiIndex.from_arrays( + [ + ["Fri"] * 4 + ["Sat"] * 2 + ["Sun"] * 2 + ["Thu"] * 3, + ["Dinner"] * 2 + ["Lunch"] * 2 + ["Dinner"] * 5 + ["Lunch"] * 2, + ["No", "Yes"] * 4 + ["No", "No", "Yes"], + ], + names=["day", "time", "smoker"], + ) + df = DataFrame( + { + "sum": np.arange(11, dtype="float64"), + "len": np.arange(11, dtype="float64"), + }, + index=mi, + ) + # it works, #2100 + result = df.unstack(2) + + recons = result.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + recons = recons.dropna(how="all") + tm.assert_frame_equal(recons, df) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_mixed_dtype(self, multiindex_dataframe_random_data, future_stack): + frame = multiindex_dataframe_random_data + + df = frame.T + df["foo", "four"] = "foo" + df = df.sort_index(level=1, axis=1) + + stacked = df.stack(future_stack=future_stack) + result = df["foo"].stack(future_stack=future_stack).sort_index() + tm.assert_series_equal(stacked["foo"], result, check_names=False) + assert result.name is None + assert stacked["bar"].dtype == np.float64 + + def test_unstack_bug(self, future_stack): + df = DataFrame( + { + "state": ["naive", "naive", "naive", "active", "active", "active"], + "exp": ["a", "b", "b", "b", "a", "a"], + "barcode": [1, 2, 3, 4, 1, 3], + "v": ["hi", "hi", "bye", "bye", "bye", "peace"], + "extra": np.arange(6.0), + } + ) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) + + unstacked = result.unstack() + restacked = unstacked.stack(future_stack=future_stack) + tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float)) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_unstack_preserve_names( + self, multiindex_dataframe_random_data, future_stack + ): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack() + assert unstacked.index.name == "first" + assert unstacked.columns.names == ["exp", "second"] + + restacked = unstacked.stack(future_stack=future_stack) + assert restacked.index.names == frame.index.names + + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name( + self, method, multiindex_dataframe_random_data, future_stack + ): + # GH 18303 - wrong level name should raise + frame = multiindex_dataframe_random_data + + # A DataFrame with flat axes: + df = frame.loc["foo"] + + kwargs = {"future_stack": future_stack} if method == "stack" else {} + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake", **kwargs) + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake", **kwargs) + + def test_unstack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.unstack("second") + expected = frame.unstack(level=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_level_name(self, multiindex_dataframe_random_data, future_stack): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack("second") + result = unstacked.stack("exp", future_stack=future_stack) + expected = frame.unstack().stack(0, future_stack=future_stack) + tm.assert_frame_equal(result, expected) + + result = frame.stack("exp", future_stack=future_stack) + expected = frame.stack(future_stack=future_stack) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_unstack_multiple( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + expected = ymd.unstack("year").unstack("month") + tm.assert_frame_equal(unstacked, expected) + assert unstacked.columns.names == expected.columns.names + + # series + s = ymd["A"] + s_unstacked = s.unstack(["year", "month"]) + tm.assert_frame_equal(s_unstacked, expected["A"]) + + restacked = unstacked.stack(["year", "month"], future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + restacked = restacked.swaplevel(0, 1).swaplevel(1, 2) + restacked = restacked.sort_index(level=0) + + tm.assert_frame_equal(restacked, ymd) + assert restacked.index.names == ymd.index.names + + # GH #451 + unstacked = ymd.unstack([1, 2]) + expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + unstacked = ymd.unstack([2, 1]) + expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns]) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_names_and_numbers( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + # Can't use mixture of names and numbers to stack + with pytest.raises(ValueError, match="level should contain"): + unstacked.stack([0, "month"], future_stack=future_stack) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_multiple_out_of_bounds( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + # nlevels == 3 + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + with pytest.raises(IndexError, match="Too many levels"): + unstacked.stack([2, 3], future_stack=future_stack) + with pytest.raises(IndexError, match="not a valid level number"): + unstacked.stack([-4, -3], future_stack=future_stack) + + def test_unstack_period_series(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period", + ) + idx2 = Index(["A", "B"] * 3, name="str") + value = [1, 2, 3, 4, 5, 6] + + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period" + ) + expected = DataFrame( + {"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"] + ) + expected.columns.name = "str" + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period1", + ) + + idx2 = pd.PeriodIndex( + ["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"], + freq="M", + name="period2", + ) + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period1" + ) + e_cols = pd.PeriodIndex( + ["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"], + freq="M", + name="period2", + ) + expected = DataFrame( + [ + [np.nan, np.nan, np.nan, np.nan, 2, 1], + [np.nan, np.nan, 4, 3, np.nan, np.nan], + [6, 5, np.nan, np.nan, np.nan, np.nan], + ], + index=e_idx, + columns=e_cols, + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + def test_unstack_period_frame(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"], + freq="M", + name="period1", + ) + idx2 = pd.PeriodIndex( + ["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"], + freq="M", + name="period2", + ) + value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]} + idx = MultiIndex.from_arrays([idx1, idx2]) + df = DataFrame(value, index=idx) + + result1 = df.unstack() + result2 = df.unstack(level=1) + result3 = df.unstack(level=0) + + e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1") + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"], + freq="M", + name="period2", + ) + e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2]) + expected = DataFrame( + [[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + + e_1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1" + ) + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02"], freq="M", name="period2" + ) + e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1]) + expected = DataFrame( + [[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols + ) + + tm.assert_frame_equal(result3, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_multiple_bug(self, future_stack): + # bug when some uniques are not present in the data GH#3170 + id_col = ([1] * 3) + ([2] * 3) + name = (["a"] * 3) + (["b"] * 3) + date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2) + var1 = np.random.default_rng(2).integers(0, 100, 6) + df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1}) + + multi = df.set_index(["DATE", "ID"]) + multi.columns.name = "Params" + unst = multi.unstack("ID") + msg = re.escape("agg function failed [how->mean,dtype->") + with pytest.raises(TypeError, match=msg): + unst.resample("W-THU").mean() + down = unst.resample("W-THU").mean(numeric_only=True) + rs = down.stack("ID", future_stack=future_stack) + xp = ( + unst.loc[:, ["VAR1"]] + .resample("W-THU") + .mean() + .stack("ID", future_stack=future_stack) + ) + xp.columns.name = "Params" + tm.assert_frame_equal(rs, xp) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_dropna(self, future_stack): + # GH#3997 + df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]}) + df = df.set_index(["A", "B"]) + + dropna = False if not future_stack else lib.no_default + stacked = df.unstack().stack(dropna=dropna, future_stack=future_stack) + assert len(stacked) > len(stacked.dropna()) + + if future_stack: + with pytest.raises(ValueError, match="dropna must be unspecified"): + df.unstack().stack(dropna=True, future_stack=future_stack) + else: + stacked = df.unstack().stack(dropna=True, future_stack=future_stack) + tm.assert_frame_equal(stacked, stacked.dropna()) + + def test_unstack_multiple_hierarchical(self, future_stack): + df = DataFrame( + index=[ + [0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 0, 0, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + ], + columns=[[0, 0, 1, 1], [0, 1, 0, 1]], + ) + + df.index.names = ["a", "b", "c"] + df.columns.names = ["d", "e"] + + # it works! + df.unstack(["b", "c"]) + + def test_unstack_sparse_keyspace(self): + # memory problems with naive impl GH#2278 + # Generate Long File & Test Pivot + NUM_ROWS = 1000 + + df = DataFrame( + { + "A": np.random.default_rng(2).integers(100, size=NUM_ROWS), + "B": np.random.default_rng(3).integers(300, size=NUM_ROWS), + "C": np.random.default_rng(4).integers(-7, 7, size=NUM_ROWS), + "D": np.random.default_rng(5).integers(-19, 19, size=NUM_ROWS), + "E": np.random.default_rng(6).integers(3000, size=NUM_ROWS), + "F": np.random.default_rng(7).standard_normal(NUM_ROWS), + } + ) + + idf = df.set_index(["A", "B", "C", "D", "E"]) + + # it works! is sufficient + idf.unstack("E") + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_unstack_unobserved_keys(self, future_stack): + # related to GH#2278 refactoring + levels = [[0, 1], [0, 1, 2, 3]] + codes = [[0, 0, 1, 1], [0, 2, 0, 2]] + + index = MultiIndex(levels, codes) + + df = DataFrame(np.random.default_rng(2).standard_normal((4, 2)), index=index) + + result = df.unstack() + assert len(result.columns) == 4 + + recons = result.stack(future_stack=future_stack) + tm.assert_frame_equal(recons, df) + + @pytest.mark.slow + def test_unstack_number_of_levels_larger_than_int32(self, monkeypatch): + # GH#20601 + # GH 26314: Change ValueError to PerformanceWarning + + class MockUnstacker(reshape_lib._Unstacker): + def __init__(self, *args, **kwargs) -> None: + # __init__ will raise the warning + super().__init__(*args, **kwargs) + raise Exception("Don't compute final result.") + + with monkeypatch.context() as m: + m.setattr(reshape_lib, "_Unstacker", MockUnstacker) + df = DataFrame( + np.zeros((2**16, 2)), + index=[np.arange(2**16), np.arange(2**16)], + ) + msg = "The following operation may generate" + with tm.assert_produces_warning(PerformanceWarning, match=msg): + with pytest.raises(Exception, match="Don't compute final result."): + df.unstack() + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize( + "levels", + itertools.chain.from_iterable( + itertools.product(itertools.permutations([0, 1, 2], width), repeat=2) + for width in [2, 3] + ), + ) + @pytest.mark.parametrize("stack_lev", range(2)) + @pytest.mark.parametrize("sort", [True, False]) + def test_stack_order_with_unsorted_levels( + self, levels, stack_lev, sort, future_stack + ): + # GH#16323 + # deep check for 1-row case + columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + df = DataFrame(columns=columns, data=[range(4)]) + kwargs = {} if future_stack else {"sort": sort} + df_stacked = df.stack(stack_lev, future_stack=future_stack, **kwargs) + for row in df.index: + for col in df.columns: + expected = df.loc[row, col] + result_row = row, col[stack_lev] + result_col = col[1 - stack_lev] + result = df_stacked.loc[result_row, result_col] + assert result == expected + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_order_with_unsorted_levels_multi_row(self, future_stack): + # GH#16323 + + # check multi-row case + mi = MultiIndex( + levels=[["A", "C", "B"], ["B", "A", "C"]], + codes=[np.repeat(range(3), 3), np.tile(range(3), 3)], + ) + df = DataFrame( + columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1) + ) + assert all( + df.loc[row, col] + == df.stack(0, future_stack=future_stack).loc[(row, col[0]), col[1]] + for row in df.index + for col in df.columns + ) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_order_with_unsorted_levels_multi_row_2(self, future_stack): + # GH#53636 + levels = ((0, 1), (1, 0)) + stack_lev = 1 + columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + df = DataFrame(columns=columns, data=[range(4)], index=[1, 0, 2, 3]) + kwargs = {} if future_stack else {"sort": True} + result = df.stack(stack_lev, future_stack=future_stack, **kwargs) + expected_index = MultiIndex( + levels=[[0, 1, 2, 3], [0, 1]], + codes=[[1, 1, 0, 0, 2, 2, 3, 3], [1, 0, 1, 0, 1, 0, 1, 0]], + ) + expected = DataFrame( + { + 0: [0, 1, 0, 1, 0, 1, 0, 1], + 1: [2, 3, 2, 3, 2, 3, 2, 3], + }, + index=expected_index, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_unstack_unordered_multiindex(self, future_stack): + # GH# 18265 + values = np.arange(5) + data = np.vstack( + [ + [f"b{x}" for x in values], # b0, b1, .. + [f"a{x}" for x in values], # a0, a1, .. + ] + ) + df = DataFrame(data.T, columns=["b", "a"]) + df.columns.name = "first" + second_level_dict = {"x": df} + multi_level_df = pd.concat(second_level_dict, axis=1) + multi_level_df.columns.names = ["second", "first"] + df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1) + result = df.stack(["first", "second"], future_stack=future_stack).unstack( + ["first", "second"] + ) + expected = DataFrame( + [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]], + index=[0, 1, 2, 3, 4], + columns=MultiIndex.from_tuples( + [("a", "x"), ("b", "x")], names=["first", "second"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_preserve_types( + self, multiindex_year_month_day_dataframe_random_data, using_infer_string + ): + # GH#403 + ymd = multiindex_year_month_day_dataframe_random_data + ymd["E"] = "foo" + ymd["F"] = 2 + + unstacked = ymd.unstack("month") + assert unstacked["A", 1].dtype == np.float64 + assert ( + unstacked["E", 1].dtype == np.object_ + if not using_infer_string + else "string" + ) + assert unstacked["F", 1].dtype == np.float64 + + def test_unstack_group_index_overflow(self, future_stack): + codes = np.tile(np.arange(500), 2) + level = np.arange(500) + + index = MultiIndex( + levels=[level] * 8 + [[0, 1]], + codes=[codes] * 8 + [np.arange(2).repeat(500)], + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack() + assert result.shape == (500, 2) + + # test roundtrip + stacked = result.stack(future_stack=future_stack) + tm.assert_series_equal(s, stacked.reindex(s.index)) + + # put it at beginning + index = MultiIndex( + levels=[[0, 1]] + [level] * 8, + codes=[np.arange(2).repeat(500)] + [codes] * 8, + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(0) + assert result.shape == (500, 2) + + # put it in middle + index = MultiIndex( + levels=[level] * 4 + [[0, 1]] + [level] * 4, + codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4), + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(4) + assert result.shape == (500, 2) + + def test_unstack_with_missing_int_cast_to_float(self, using_array_manager): + # https://github.com/pandas-dev/pandas/issues/37115 + df = DataFrame( + { + "a": ["A", "A", "B"], + "b": ["ca", "cb", "cb"], + "v": [10] * 3, + } + ).set_index(["a", "b"]) + + # add another int column to get 2 blocks + df["is_"] = 1 + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + result = df.unstack("b") + result[("is_", "ca")] = result[("is_", "ca")].fillna(0) + + expected = DataFrame( + [[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]], + index=Index(["A", "B"], name="a"), + columns=MultiIndex.from_tuples( + [("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")], + names=[None, "b"], + ), + ) + if using_array_manager: + # INFO(ArrayManager) with ArrayManager preserve dtype where possible + expected[("v", "cb")] = expected[("v", "cb")].astype("int64") + expected[("is_", "cb")] = expected[("is_", "cb")].astype("int64") + tm.assert_frame_equal(result, expected) + + def test_unstack_with_level_has_nan(self): + # GH 37510 + df1 = DataFrame( + { + "L1": [1, 2, 3, 4], + "L2": [3, 4, 1, 2], + "L3": [1, 1, 1, 1], + "x": [1, 2, 3, 4], + } + ) + df1 = df1.set_index(["L1", "L2", "L3"]) + new_levels = ["n1", "n2", "n3", None] + df1.index = df1.index.set_levels(levels=new_levels, level="L1") + df1.index = df1.index.set_levels(levels=new_levels, level="L2") + + result = df1.unstack("L3")[("x", 1)].sort_index().index + expected = MultiIndex( + levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]], + codes=[[0, 1, 2, 3], [2, 3, 0, 1]], + names=["L1", "L2"], + ) + + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_nan_in_multiindex_columns(self, future_stack): + # GH#39481 + df = DataFrame( + np.zeros([1, 5]), + columns=MultiIndex.from_tuples( + [ + (0, None, None), + (0, 2, 0), + (0, 2, 1), + (0, 3, 0), + (0, 3, 1), + ], + ), + ) + result = df.stack(2, future_stack=future_stack) + if future_stack: + index = MultiIndex(levels=[[0], [0.0, 1.0]], codes=[[0, 0, 0], [-1, 0, 1]]) + columns = MultiIndex(levels=[[0], [2, 3]], codes=[[0, 0, 0], [-1, 0, 1]]) + else: + index = Index([(0, None), (0, 0), (0, 1)]) + columns = Index([(0, None), (0, 2), (0, 3)]) + expected = DataFrame( + [[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]], + index=index, + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_multi_level_stack_categorical(self, future_stack): + # GH 15239 + midx = MultiIndex.from_arrays( + [ + ["A"] * 2 + ["B"] * 2, + pd.Categorical(list("abab")), + pd.Categorical(list("ccdd")), + ] + ) + df = DataFrame(np.arange(8).reshape(2, 4), columns=midx) + result = df.stack([1, 2], future_stack=future_stack) + if future_stack: + expected = DataFrame( + [ + [0, np.nan], + [1, np.nan], + [np.nan, 2], + [np.nan, 3], + [4, np.nan], + [5, np.nan], + [np.nan, 6], + [np.nan, 7], + ], + columns=["A", "B"], + index=MultiIndex.from_arrays( + [ + [0] * 4 + [1] * 4, + pd.Categorical(list("abababab")), + pd.Categorical(list("ccddccdd")), + ] + ), + ) + else: + expected = DataFrame( + [ + [0, np.nan], + [np.nan, 2], + [1, np.nan], + [np.nan, 3], + [4, np.nan], + [np.nan, 6], + [5, np.nan], + [np.nan, 7], + ], + columns=["A", "B"], + index=MultiIndex.from_arrays( + [ + [0] * 4 + [1] * 4, + pd.Categorical(list("aabbaabb")), + pd.Categorical(list("cdcdcdcd")), + ] + ), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_nan_level(self, future_stack): + # GH 9406 + df_nan = DataFrame( + np.arange(4).reshape(2, 2), + columns=MultiIndex.from_tuples( + [("A", np.nan), ("B", "b")], names=["Upper", "Lower"] + ), + index=Index([0, 1], name="Num"), + dtype=np.float64, + ) + result = df_nan.stack(future_stack=future_stack) + if future_stack: + index = MultiIndex( + levels=[[0, 1], [np.nan, "b"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["Num", "Lower"], + ) + else: + index = MultiIndex.from_tuples( + [(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"] + ) + expected = DataFrame( + [[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]], + columns=Index(["A", "B"], name="Upper"), + index=index, + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_categorical_columns(self): + # GH 14018 + idx = MultiIndex.from_product([["A"], [0, 1]]) + df = DataFrame({"cat": pd.Categorical(["a", "b"])}, index=idx) + result = df.unstack() + expected = DataFrame( + { + 0: pd.Categorical(["a"], categories=["a", "b"]), + 1: pd.Categorical(["b"], categories=["a", "b"]), + }, + index=["A"], + ) + expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_unsorted(self, future_stack): + # GH 16925 + PAE = ["ITA", "FRA"] + VAR = ["A1", "A2"] + TYP = ["CRT", "DBT", "NET"] + MI = MultiIndex.from_product([PAE, VAR, TYP], names=["PAE", "VAR", "TYP"]) + + V = list(range(len(MI))) + DF = DataFrame(data=V, index=MI, columns=["VALUE"]) + + DF = DF.unstack(["VAR", "TYP"]) + DF.columns = DF.columns.droplevel(0) + DF.loc[:, ("A0", "NET")] = 9999 + + result = DF.stack(["VAR", "TYP"], future_stack=future_stack).sort_index() + expected = ( + DF.sort_index(axis=1) + .stack(["VAR", "TYP"], future_stack=future_stack) + .sort_index() + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + def test_stack_nullable_dtype(self, future_stack): + # GH#43561 + columns = MultiIndex.from_product( + [["54511", "54515"], ["r", "t_mean"]], names=["station", "element"] + ) + index = Index([1, 2, 3], name="time") + + arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]]) + df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype()) + + result = df.stack("station", future_stack=future_stack) + + expected = ( + df.astype(np.int64) + .stack("station", future_stack=future_stack) + .astype(pd.Int64Dtype()) + ) + tm.assert_frame_equal(result, expected) + + # non-homogeneous case + df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype()) + result = df.stack("station", future_stack=future_stack) + + expected = DataFrame( + { + "r": pd.array( + [50.0, 10.0, 10.0, 9.0, 305.0, 111.0], dtype=pd.Float64Dtype() + ), + "t_mean": pd.array( + [226, 215, 215, 220, 232, 220], dtype=pd.Int64Dtype() + ), + }, + index=MultiIndex.from_product([index, columns.levels[0]]), + ) + expected.columns.name = "element" + tm.assert_frame_equal(result, expected) + + def test_unstack_mixed_level_names(self): + # GH#48763 + arrays = [["a", "a"], [1, 2], ["red", "blue"]] + idx = MultiIndex.from_arrays(arrays, names=("x", 0, "y")) + df = DataFrame({"m": [1, 2]}, index=idx) + result = df.unstack("x") + expected = DataFrame( + [[1], [2]], + columns=MultiIndex.from_tuples([("m", "a")], names=[None, "x"]), + index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_stack_tuple_columns(future_stack): + # GH#54948 - test stack when the input has a non-MultiIndex with tuples + df = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[("a", 1), ("a", 2), ("b", 1)] + ) + result = df.stack(future_stack=future_stack) + expected = Series( + [1, 2, 3, 4, 5, 6, 7, 8, 9], + index=MultiIndex( + levels=[[0, 1, 2], [("a", 1), ("a", 2), ("b", 1)]], + codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], + ), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype, na_value", + [ + ("float64", np.nan), + ("Float64", np.nan), + ("Float64", pd.NA), + ("Int64", pd.NA), + ], +) +@pytest.mark.parametrize("test_multiindex", [True, False]) +def test_stack_preserves_na(dtype, na_value, test_multiindex): + # GH#56573 + if test_multiindex: + index = MultiIndex.from_arrays(2 * [Index([na_value], dtype=dtype)]) + else: + index = Index([na_value], dtype=dtype) + df = DataFrame({"a": [1]}, index=index) + result = df.stack(future_stack=True) + + if test_multiindex: + expected_index = MultiIndex.from_arrays( + [ + Index([na_value], dtype=dtype), + Index([na_value], dtype=dtype), + Index(["a"]), + ] + ) + else: + expected_index = MultiIndex.from_arrays( + [ + Index([na_value], dtype=dtype), + Index(["a"]), + ] + ) + expected = Series(1, index=expected_index) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_subclass.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..855b58229cbdb5819e83e3abe39a938bbb8658eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_subclass.py @@ -0,0 +1,825 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) + + +@pytest.fixture() +def gpd_style_subclass_df(): + class SubclassedDataFrame(DataFrame): + @property + def _constructor(self): + return SubclassedDataFrame + + return SubclassedDataFrame({"a": [1, 2, 3]}) + + +class TestDataFrameSubclassing: + def test_no_warning_on_mgr(self): + # GH#57032 + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] + ) + with tm.assert_produces_warning(None): + # df.isna() goes through _constructor_from_mgr, which we want to + # *not* pass a Manager do __init__ + df.isna() + df["X"].isna() + + def test_frame_subclassing_and_slicing(self): + # Subclass frame and ensure it returns the right class on slicing it + # In reference to PR 9632 + + class CustomSeries(Series): + @property + def _constructor(self): + return CustomSeries + + def custom_series_function(self): + return "OK" + + class CustomDataFrame(DataFrame): + """ + Subclasses pandas DF, fills DF with simulation results, adds some + custom plotting functions. + """ + + def __init__(self, *args, **kw) -> None: + super().__init__(*args, **kw) + + @property + def _constructor(self): + return CustomDataFrame + + _constructor_sliced = CustomSeries + + def custom_frame_function(self): + return "OK" + + data = {"col1": range(10), "col2": range(10)} + cdf = CustomDataFrame(data) + + # Did we get back our own DF class? + assert isinstance(cdf, CustomDataFrame) + + # Do we get back our own Series class after selecting a column? + cdf_series = cdf.col1 + assert isinstance(cdf_series, CustomSeries) + assert cdf_series.custom_series_function() == "OK" + + # Do we get back our own DF class after slicing row-wise? + cdf_rows = cdf[1:5] + assert isinstance(cdf_rows, CustomDataFrame) + assert cdf_rows.custom_frame_function() == "OK" + + # Make sure sliced part of multi-index frame is custom class + mcol = MultiIndex.from_tuples([("A", "A"), ("A", "B")]) + cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + assert isinstance(cdf_multi["A"], CustomDataFrame) + + mcol = MultiIndex.from_tuples([("A", ""), ("B", "")]) + cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + assert isinstance(cdf_multi2["A"], CustomSeries) + + def test_dataframe_metadata(self): + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] + ) + df.testattr = "XXX" + + assert df.testattr == "XXX" + assert df[["X"]].testattr == "XXX" + assert df.loc[["a", "b"], :].testattr == "XXX" + assert df.iloc[[0, 1], :].testattr == "XXX" + + # see gh-9776 + assert df.iloc[0:1, :].testattr == "XXX" + + # see gh-10553 + unpickled = tm.round_trip_pickle(df) + tm.assert_frame_equal(df, unpickled) + assert df._metadata == unpickled._metadata + assert df.testattr == unpickled.testattr + + def test_indexing_sliced(self): + # GH 11559 + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"] + ) + res = df.loc[:, "X"] + exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.iloc[:, 1] + exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc[:, "Z"] + exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc["a", :] + exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.iloc[1, :] + exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc["c", :] + exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + def test_subclass_attr_err_propagation(self): + # GH 11808 + class A(DataFrame): + @property + def nonexistence(self): + return self.i_dont_exist + + with pytest.raises(AttributeError, match=".*i_dont_exist.*"): + A().nonexistence + + def test_subclass_align(self): + # GH 12983 + df1 = tm.SubclassedDataFrame( + {"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE") + ) + df2 = tm.SubclassedDataFrame( + {"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD") + ) + + res1, res2 = df1.align(df2, axis=0) + exp1 = tm.SubclassedDataFrame( + {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, + index=list("ABCDE"), + ) + exp2 = tm.SubclassedDataFrame( + {"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]}, + index=list("ABCDE"), + ) + assert isinstance(res1, tm.SubclassedDataFrame) + tm.assert_frame_equal(res1, exp1) + assert isinstance(res2, tm.SubclassedDataFrame) + tm.assert_frame_equal(res2, exp2) + + res1, res2 = df1.a.align(df2.c) + assert isinstance(res1, tm.SubclassedSeries) + tm.assert_series_equal(res1, exp1.a) + assert isinstance(res2, tm.SubclassedSeries) + tm.assert_series_equal(res2, exp2.c) + + def test_subclass_align_combinations(self): + # GH 12983 + df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) + s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x") + + # frame + series + res1, res2 = df.align(s, axis=0) + exp1 = tm.SubclassedDataFrame( + {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, + index=list("ABCDE"), + ) + # name is lost when + exp2 = tm.SubclassedSeries( + [1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x" + ) + + assert isinstance(res1, tm.SubclassedDataFrame) + tm.assert_frame_equal(res1, exp1) + assert isinstance(res2, tm.SubclassedSeries) + tm.assert_series_equal(res2, exp2) + + # series + frame + res1, res2 = s.align(df) + assert isinstance(res1, tm.SubclassedSeries) + tm.assert_series_equal(res1, exp2) + assert isinstance(res2, tm.SubclassedDataFrame) + tm.assert_frame_equal(res2, exp1) + + def test_subclass_iterrows(self): + # GH 13977 + df = tm.SubclassedDataFrame({"a": [1]}) + for i, row in df.iterrows(): + assert isinstance(row, tm.SubclassedSeries) + tm.assert_series_equal(row, df.loc[i]) + + def test_subclass_stack(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["a", "b", "c"], + columns=["X", "Y", "Z"], + ) + + res = df.stack(future_stack=True) + exp = tm.SubclassedSeries( + [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")] + ) + + tm.assert_series_equal(res, exp) + + def test_subclass_stack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 12], + [11, 13], + [20, 22], + [21, 23], + [30, 32], + [31, 33], + [40, 42], + [41, 43], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), + names=["aaa", "ccc", "yyy"], + ), + columns=Index(["W", "X"], name="www"), + ) + + res = df.stack(future_stack=True) + tm.assert_frame_equal(res, exp) + + res = df.stack("yyy", future_stack=True) + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10, 11], + [12, 13], + [20, 21], + [22, 23], + [30, 31], + [32, 33], + [40, 41], + [42, 43], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), + names=["aaa", "ccc", "www"], + ), + columns=Index(["y", "z"], name="yyy"), + ) + + res = df.stack("www", future_stack=True) + tm.assert_frame_equal(res, exp) + + def test_subclass_stack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 12.0], + [11, 13.0], + [20, 22.0], + [21, 23.0], + [30, 32.0], + [31, 33.0], + [40, 42.0], + [41, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), + names=["aaa", "ccc", "yyy"], + ), + columns=Index(["W", "X"], name="www"), + ) + + res = df.stack(future_stack=True) + tm.assert_frame_equal(res, exp) + + res = df.stack("yyy", future_stack=True) + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10.0, 11.0], + [12.0, 13.0], + [20.0, 21.0], + [22.0, 23.0], + [30.0, 31.0], + [32.0, 33.0], + [40.0, 41.0], + [42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), + names=["aaa", "ccc", "www"], + ), + columns=Index(["y", "z"], name="yyy"), + ) + + res = df.stack("www", future_stack=True) + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["a", "b", "c"], + columns=["X", "Y", "Z"], + ) + + res = df.unstack() + exp = tm.SubclassedSeries( + [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")] + ) + + tm.assert_series_equal(res, exp) + + def test_subclass_unstack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]], + index=Index(["A", "B"], name="aaa"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))), + names=["www", "yyy", "ccc"], + ), + ) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack("ccc") + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]], + index=Index(["c", "d"], name="ccc"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))), + names=["www", "yyy", "aaa"], + ), + ) + + res = df.unstack("aaa") + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0], + [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0], + ], + index=Index(["A", "B"], name="aaa"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))), + names=["www", "yyy", "ccc"], + ), + ) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack("ccc") + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0], + [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0], + ], + index=Index(["c", "d"], name="ccc"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))), + names=["www", "yyy", "aaa"], + ), + ) + + res = df.unstack("aaa") + tm.assert_frame_equal(res, exp) + + def test_subclass_pivot(self): + # GH 15564 + df = tm.SubclassedDataFrame( + { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + ) + + pivoted = df.pivot(index="index", columns="columns", values="values") + + expected = tm.SubclassedDataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + + tm.assert_frame_equal(pivoted, expected) + + def test_subclassed_melt(self): + # GH 15564 + cheese = tm.SubclassedDataFrame( + { + "first": ["John", "Mary"], + "last": ["Doe", "Bo"], + "height": [5.5, 6.0], + "weight": [130, 150], + } + ) + + melted = pd.melt(cheese, id_vars=["first", "last"]) + + expected = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 5.5], + ["Mary", "Bo", "height", 6.0], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + tm.assert_frame_equal(melted, expected) + + def test_subclassed_wide_to_long(self): + # GH 9762 + + x = np.random.default_rng(2).standard_normal(3) + df = tm.SubclassedDataFrame( + { + "A1970": {0: "a", 1: "b", 2: "c"}, + "A1980": {0: "d", 1: "e", 2: "f"}, + "B1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = tm.SubclassedDataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") + + tm.assert_frame_equal(long_frame, expected) + + def test_subclassed_apply(self): + # GH 19822 + + def check_row_subclass(row): + assert isinstance(row, tm.SubclassedSeries) + + def stretch(row): + if row["variable"] == "height": + row["value"] += 0.5 + return row + + df = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 5.5], + ["Mary", "Bo", "height", 6.0], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + df.apply(lambda x: check_row_subclass(x)) + df.apply(lambda x: check_row_subclass(x), axis=1) + + expected = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 6.0], + ["Mary", "Bo", "height", 6.5], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + result = df.apply(lambda x: stretch(x), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + + result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + + result = df.apply(lambda x: [1, 2, 3], axis=1) + assert not isinstance(result, tm.SubclassedDataFrame) + tm.assert_series_equal(result, expected) + + def test_subclassed_reductions(self, all_reductions): + # GH 25596 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = getattr(df, all_reductions)() + assert isinstance(result, tm.SubclassedSeries) + + def test_subclassed_count(self): + df = tm.SubclassedDataFrame( + { + "Person": ["John", "Myla", "Lewis", "John", "Myla"], + "Age": [24.0, np.nan, 21.0, 33, 26], + "Single": [False, True, True, True, False], + } + ) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame({"A": [1, 0, 3], "B": [0, 5, 6], "C": [7, 8, 0]}) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame() + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + def test_isin(self): + df = tm.SubclassedDataFrame( + {"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"] + ) + result = df.isin([0, 2]) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_duplicated(self): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.duplicated() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame() + result = df.duplicated() + assert isinstance(result, tm.SubclassedSeries) + + @pytest.mark.parametrize("idx_method", ["idxmax", "idxmin"]) + def test_idx(self, idx_method): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = getattr(df, idx_method)() + assert isinstance(result, tm.SubclassedSeries) + + def test_dot(self): + df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + s = tm.SubclassedSeries([1, 1, 2, 1]) + result = df.dot(s) + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + s = tm.SubclassedDataFrame([1, 1, 2, 1]) + result = df.dot(s) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_memory_usage(self): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.memory_usage() + assert isinstance(result, tm.SubclassedSeries) + + result = df.memory_usage(index=False) + assert isinstance(result, tm.SubclassedSeries) + + def test_corrwith(self): + pytest.importorskip("scipy") + index = ["a", "b", "c", "d", "e"] + columns = ["one", "two", "three", "four"] + df1 = tm.SubclassedDataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + index=index, + columns=columns, + ) + df2 = tm.SubclassedDataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=index[:4], + columns=columns, + ) + correls = df1.corrwith(df2, axis=1, drop=True, method="kendall") + + assert isinstance(correls, (tm.SubclassedSeries)) + + def test_asof(self): + N = 3 + rng = pd.date_range("1/1/1990", periods=N, freq="53s") + df = tm.SubclassedDataFrame( + { + "A": [np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, np.nan], + "C": [np.nan, np.nan, np.nan], + }, + index=rng, + ) + + result = df.asof(rng[-2:]) + assert isinstance(result, tm.SubclassedDataFrame) + + result = df.asof(rng[-2]) + assert isinstance(result, tm.SubclassedSeries) + + result = df.asof("1989-12-31") + assert isinstance(result, tm.SubclassedSeries) + + def test_idxmin_preserves_subclass(self): + # GH 28330 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.idxmin() + assert isinstance(result, tm.SubclassedSeries) + + def test_idxmax_preserves_subclass(self): + # GH 28330 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.idxmax() + assert isinstance(result, tm.SubclassedSeries) + + def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df): + # GH 43668 + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.convert_dtypes() + assert isinstance(result, tm.SubclassedDataFrame) + + result = gpd_style_subclass_df.convert_dtypes() + assert isinstance(result, type(gpd_style_subclass_df)) + + def test_astype_preserves_subclass(self): + # GH#40810 + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + + result = df.astype({"A": np.int64, "B": np.int32, "C": np.float64}) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_equals_subclass(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]}) + assert df1.equals(df2) + assert df2.equals(df1) + + def test_replace_list_method(self): + # https://github.com/pandas-dev/pandas/pull/46018 + df = tm.SubclassedDataFrame({"A": [0, 1, 2]}) + msg = "The 'method' keyword in SubclassedDataFrame.replace is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=msg, raise_on_extra_warnings=False + ): + result = df.replace([1, 2], method="ffill") + expected = tm.SubclassedDataFrame({"A": [0, 0, 0]}) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + +class MySubclassWithMetadata(DataFrame): + _metadata = ["my_metadata"] + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + my_metadata = kwargs.pop("my_metadata", None) + if args and isinstance(args[0], MySubclassWithMetadata): + my_metadata = args[0].my_metadata # type: ignore[has-type] + self.my_metadata = my_metadata + + @property + def _constructor(self): + return MySubclassWithMetadata + + +def test_constructor_with_metadata(): + # https://github.com/pandas-dev/pandas/pull/54922 + # https://github.com/pandas-dev/pandas/issues/55120 + df = MySubclassWithMetadata( + np.random.default_rng(2).random((5, 3)), columns=["A", "B", "C"] + ) + subset = df[["A", "B"]] + assert isinstance(subset, MySubclassWithMetadata) + + +class SimpleDataFrameSubClass(DataFrame): + """A subclass of DataFrame that does not define a constructor.""" + + +class SimpleSeriesSubClass(Series): + """A subclass of Series that does not define a constructor.""" + + +class TestSubclassWithoutConstructor: + def test_copy_df(self): + expected = DataFrame({"a": [1, 2, 3]}) + result = SimpleDataFrameSubClass(expected).copy() + + assert ( + type(result) is DataFrame + ) # assert_frame_equal only checks isinstance(lhs, type(rhs)) + tm.assert_frame_equal(result, expected) + + def test_copy_series(self): + expected = Series([1, 2, 3]) + result = SimpleSeriesSubClass(expected).copy() + + tm.assert_series_equal(result, expected) + + def test_series_to_frame(self): + orig = Series([1, 2, 3]) + expected = orig.to_frame() + result = SimpleSeriesSubClass(orig).to_frame() + + assert ( + type(result) is DataFrame + ) # assert_frame_equal only checks isinstance(lhs, type(rhs)) + tm.assert_frame_equal(result, expected) + + def test_groupby(self): + df = SimpleDataFrameSubClass(DataFrame({"a": [1, 2, 3]})) + + for _, v in df.groupby("a"): + assert type(v) is DataFrame diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_ufunc.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..88c62da2b0a735b103f7a6b03634aa185fc46d2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_ufunc.py @@ -0,0 +1,311 @@ +from functools import partial +import re + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_extension_array_dtype + +dtypes = [ + "int64", + "Int64", + {"A": "int64", "B": "Int64"}, +] + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_unary(dtype): + # unary input, unary output + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.positive(df) + expected = pd.DataFrame( + np.positive(values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_binary(request, dtype): + # unary input, binary output + if is_extension_array_dtype(dtype) or isinstance(dtype, dict): + request.applymarker( + pytest.mark.xfail( + reason="Extension / mixed with multiple outputs not implemented." + ) + ) + + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result_pandas = np.modf(df) + assert isinstance(result_pandas, tuple) + assert len(result_pandas) == 2 + expected_numpy = np.modf(values) + + for result, b in zip(result_pandas, expected_numpy): + expected = pd.DataFrame(b, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_dispatch_binop(dtype): + # binop ufuncs are dispatched to our dunder methods. + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.add(df, df) + expected = pd.DataFrame( + np.add(values, values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func,arg,expected", + [ + (np.add, 1, [2, 3, 4, 5]), + ( + partial(np.add, where=[[False, True], [True, False]]), + np.array([[1, 1], [1, 1]]), + [0, 3, 4, 0], + ), + (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]), + (np.subtract, 2, [-1, 0, 1, 2]), + ( + partial(np.negative, where=np.array([[False, True], [True, False]])), + None, + [0, -2, -3, 0], + ), + ], +) +def test_ufunc_passes_args(func, arg, expected): + # GH#40662 + arr = np.array([[1, 2], [3, 4]]) + df = pd.DataFrame(arr) + result_inplace = np.zeros_like(arr) + # 1-argument ufunc + if arg is None: + result = func(df, out=result_inplace) + else: + result = func(df, arg, out=result_inplace) + + expected = np.array(expected).reshape(2, 2) + tm.assert_numpy_array_equal(result_inplace, expected) + + expected = pd.DataFrame(expected) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype_a", dtypes) +@pytest.mark.parametrize("dtype_b", dtypes) +def test_binary_input_aligns_columns(request, dtype_a, dtype_b): + if ( + is_extension_array_dtype(dtype_a) + or isinstance(dtype_a, dict) + or is_extension_array_dtype(dtype_b) + or isinstance(dtype_b, dict) + ): + request.applymarker( + pytest.mark.xfail( + reason="Extension / mixed with multiple inputs not implemented." + ) + ) + + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a) + + if isinstance(dtype_a, dict) and isinstance(dtype_b, dict): + dtype_b = dtype_b.copy() + dtype_b["C"] = dtype_b.pop("B") + df2 = pd.DataFrame({"A": [1, 2], "C": [3, 4]}).astype(dtype_b) + # As of 2.0, align first before applying the ufunc + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3, np.nan], [2, 4, np.nan]]), + np.array([[1, np.nan, 3], [2, np.nan, 4]]), + ) + expected = pd.DataFrame(expected, index=[0, 1], columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + result = np.heaviside(df1, df2.values) + expected = pd.DataFrame([[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_aligns_index(request, dtype): + if is_extension_array_dtype(dtype) or isinstance(dtype, dict): + request.applymarker( + pytest.mark.xfail( + reason="Extension / mixed with multiple inputs not implemented." + ) + ) + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype) + df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype) + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3], [3, 4], [np.nan, np.nan]]), + np.array([[1, 3], [np.nan, np.nan], [3, 4]]), + ) + # TODO(FloatArray): this will be Float64Dtype. + expected = pd.DataFrame(expected, index=["a", "b", "c"], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + result = np.heaviside(df1, df2.values) + expected = pd.DataFrame( + [[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"], index=["a", "b"] + ) + tm.assert_frame_equal(result, expected) + + +def test_binary_frame_series_raises(): + # We don't currently implement + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df, df["A"]) + + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df["A"], df) + + +def test_unary_accumulate_axis(): + # https://github.com/pandas-dev/pandas/issues/39259 + df = pd.DataFrame({"a": [1, 3, 2, 4]}) + result = np.maximum.accumulate(df) + expected = pd.DataFrame({"a": [1, 3, 3, 4]}) + tm.assert_frame_equal(result, expected) + + df = pd.DataFrame({"a": [1, 3, 2, 4], "b": [0.1, 4.0, 3.0, 2.0]}) + result = np.maximum.accumulate(df) + # in theory could preserve int dtype for default axis=0 + expected = pd.DataFrame({"a": [1.0, 3.0, 3.0, 4.0], "b": [0.1, 4.0, 4.0, 4.0]}) + tm.assert_frame_equal(result, expected) + + result = np.maximum.accumulate(df, axis=0) + tm.assert_frame_equal(result, expected) + + result = np.maximum.accumulate(df, axis=1) + expected = pd.DataFrame({"a": [1.0, 3.0, 2.0, 4.0], "b": [1.0, 4.0, 3.0, 4.0]}) + tm.assert_frame_equal(result, expected) + + +def test_frame_outer_disallowed(): + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(NotImplementedError, match=""): + # deprecation enforced in 2.0 + np.subtract.outer(df, df) + + +def test_alignment_deprecation_enforced(): + # Enforced in 2.0 + # https://github.com/pandas-dev/pandas/issues/39184 + df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + s1 = pd.Series([1, 2], index=["a", "b"]) + s2 = pd.Series([1, 2], index=["b", "c"]) + + # binary dataframe / dataframe + expected = pd.DataFrame({"a": [2, 4, 6], "b": [8, 10, 12]}) + + with tm.assert_produces_warning(None): + # aligned -> no warning! + result = np.add(df1, df1) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, df2.values) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, df2) + expected = pd.DataFrame({"a": [np.nan] * 3, "b": [5, 7, 9], "c": [np.nan] * 3}) + tm.assert_frame_equal(result, expected) + + result = np.add(df1.values, df2) + expected = pd.DataFrame({"b": [2, 4, 6], "c": [8, 10, 12]}) + tm.assert_frame_equal(result, expected) + + # binary dataframe / series + expected = pd.DataFrame({"a": [2, 3, 4], "b": [6, 7, 8]}) + + with tm.assert_produces_warning(None): + # aligned -> no warning! + result = np.add(df1, s1) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, s2.values) + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame( + {"a": [np.nan] * 3, "b": [5.0, 6.0, 7.0], "c": [np.nan] * 3} + ) + result = np.add(df1, s2) + tm.assert_frame_equal(result, expected) + + msg = "Cannot apply ufunc to mixed DataFrame and Series inputs." + with pytest.raises(NotImplementedError, match=msg): + np.add(s2, df1) + + +def test_alignment_deprecation_many_inputs_enforced(): + # Enforced in 2.0 + # https://github.com/pandas-dev/pandas/issues/39184 + # test that the deprecation also works with > 2 inputs -> using a numba + # written ufunc for this because numpy itself doesn't have such ufuncs + numba = pytest.importorskip("numba") + + @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)]) + def my_ufunc(x, y, z): + return x + y + z + + df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + df3 = pd.DataFrame({"a": [1, 2, 3], "c": [4, 5, 6]}) + + result = my_ufunc(df1, df2, df3) + expected = pd.DataFrame(np.full((3, 3), np.nan), columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + # all aligned -> no warning + with tm.assert_produces_warning(None): + result = my_ufunc(df1, df1, df1) + expected = pd.DataFrame([[3.0, 12.0], [6.0, 15.0], [9.0, 18.0]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # mixed frame / arrays + msg = ( + r"operands could not be broadcast together with shapes \(3,3\) \(3,3\) \(3,2\)" + ) + with pytest.raises(ValueError, match=msg): + my_ufunc(df1, df2, df3.values) + + # single frame -> no warning + with tm.assert_produces_warning(None): + result = my_ufunc(df1, df2.values, df3.values) + tm.assert_frame_equal(result, expected) + + # takes indices of first frame + msg = ( + r"operands could not be broadcast together with shapes \(3,2\) \(3,3\) \(3,3\)" + ) + with pytest.raises(ValueError, match=msg): + my_ufunc(df1.values, df2, df3) + + +def test_array_ufuncs_for_many_arguments(): + # GH39853 + def add3(x, y, z): + return x + y + z + + ufunc = np.frompyfunc(add3, 3, 1) + df = pd.DataFrame([[1, 2], [3, 4]]) + + result = ufunc(df, df, 1) + expected = pd.DataFrame([[3, 5], [7, 9]], dtype=object) + tm.assert_frame_equal(result, expected) + + ser = pd.Series([1, 2]) + msg = ( + "Cannot apply ufunc " + "to mixed DataFrame and Series inputs." + ) + with pytest.raises(NotImplementedError, match=re.escape(msg)): + ufunc(df, df, ser) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_unary.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..850c92013694fa6724d0b3450a1f378600eaadab --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_unary.py @@ -0,0 +1,204 @@ +from decimal import Decimal + +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +import pandas as pd +import pandas._testing as tm + + +class TestDataFrameUnaryOperators: + # __pos__, __neg__, __invert__ + + @pytest.mark.parametrize( + "df,expected", + [ + (pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})), + (pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})), + ( + pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}), + pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}), + ), + ], + ) + def test_neg_numeric(self, df, expected): + tm.assert_frame_equal(-df, expected) + tm.assert_series_equal(-df["a"], expected["a"]) + + @pytest.mark.parametrize( + "df, expected", + [ + (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)), + ([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]), + ], + ) + def test_neg_object(self, df, expected): + # GH#21380 + df = pd.DataFrame({"a": df}) + expected = pd.DataFrame({"a": expected}) + tm.assert_frame_equal(-df, expected) + tm.assert_series_equal(-df["a"], expected["a"]) + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": ["a", "b"]}), + pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}), + ], + ) + def test_neg_raises(self, df, using_infer_string): + msg = ( + "bad operand type for unary -: 'str'|" + r"bad operand type for unary -: 'DatetimeArray'" + ) + if using_infer_string and df.dtypes.iloc[0] == "string": + import pyarrow as pa + + msg = "has no kernel" + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + (-df) + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + (-df["a"]) + + else: + with pytest.raises(TypeError, match=msg): + (-df) + with pytest.raises(TypeError, match=msg): + (-df["a"]) + + def test_invert(self, float_frame): + df = float_frame + + tm.assert_frame_equal(-(df < 0), ~(df < 0)) + + def test_invert_mixed(self): + shape = (10, 5) + df = pd.concat( + [ + pd.DataFrame(np.zeros(shape, dtype="bool")), + pd.DataFrame(np.zeros(shape, dtype=int)), + ], + axis=1, + ignore_index=True, + ) + result = ~df + expected = pd.concat( + [ + pd.DataFrame(np.ones(shape, dtype="bool")), + pd.DataFrame(-np.ones(shape, dtype=int)), + ], + axis=1, + ignore_index=True, + ) + tm.assert_frame_equal(result, expected) + + def test_invert_empty_not_input(self): + # GH#51032 + df = pd.DataFrame() + result = ~df + tm.assert_frame_equal(df, result) + assert df is not result + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": [-1, 1]}), + pd.DataFrame({"a": [False, True]}), + pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}), + ], + ) + def test_pos_numeric(self, df): + # GH#16073 + tm.assert_frame_equal(+df, df) + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": np.array([-1, 2], dtype=object)}), + pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}), + ], + ) + def test_pos_object(self, df): + # GH#21380 + tm.assert_frame_equal(+df, df) + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", + [ + pytest.param( + pd.DataFrame({"a": ["a", "b"]}), + # filterwarnings removable once min numpy version is 1.25 + marks=[ + pytest.mark.filterwarnings("ignore:Applying:DeprecationWarning") + ], + ), + ], + ) + def test_pos_object_raises(self, df): + # GH#21380 + if np_version_gte1p25: + with pytest.raises( + TypeError, match=r"^bad operand type for unary \+: \'str\'$" + ): + tm.assert_frame_equal(+df, df) + else: + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})] + ) + def test_pos_raises(self, df): + msg = r"bad operand type for unary \+: 'DatetimeArray'" + with pytest.raises(TypeError, match=msg): + (+df) + with pytest.raises(TypeError, match=msg): + (+df["a"]) + + def test_unary_nullable(self): + df = pd.DataFrame( + { + "a": pd.array([1, -2, 3, pd.NA], dtype="Int64"), + "b": pd.array([4.0, -5.0, 6.0, pd.NA], dtype="Float32"), + "c": pd.array([True, False, False, pd.NA], dtype="boolean"), + # include numpy bool to make sure bool-vs-boolean behavior + # is consistent in non-NA locations + "d": np.array([True, False, False, True]), + } + ) + + result = +df + res_ufunc = np.positive(df) + expected = df + # TODO: assert that we have copies? + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) + + result = -df + res_ufunc = np.negative(df) + expected = pd.DataFrame( + { + "a": pd.array([-1, 2, -3, pd.NA], dtype="Int64"), + "b": pd.array([-4.0, 5.0, -6.0, pd.NA], dtype="Float32"), + "c": pd.array([False, True, True, pd.NA], dtype="boolean"), + "d": np.array([False, True, True, False]), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) + + result = abs(df) + res_ufunc = np.abs(df) + expected = pd.DataFrame( + { + "a": pd.array([1, 2, 3, pd.NA], dtype="Int64"), + "b": pd.array([4.0, 5.0, 6.0, pd.NA], dtype="Float32"), + "c": pd.array([True, False, False, pd.NA], dtype="boolean"), + "d": np.array([True, False, False, True]), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/frame/test_validate.py b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_validate.py new file mode 100644 index 0000000000000000000000000000000000000000..e99e0a686384883d570feef949597d08da7e8ff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/frame/test_validate.py @@ -0,0 +1,41 @@ +import pytest + +from pandas.core.frame import DataFrame + + +@pytest.fixture +def dataframe(): + return DataFrame({"a": [1, 2], "b": [3, 4]}) + + +class TestDataFrameValidate: + """Tests for error handling related to data types of method arguments.""" + + @pytest.mark.parametrize( + "func", + [ + "query", + "eval", + "set_index", + "reset_index", + "dropna", + "drop_duplicates", + "sort_values", + ], + ) + @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, dataframe, func, inplace): + msg = 'For argument "inplace" expected type bool' + kwargs = {"inplace": inplace} + + if func == "query": + kwargs["expr"] = "a > b" + elif func == "eval": + kwargs["expr"] = "a + b" + elif func == "set_index": + kwargs["keys"] = ["a"] + elif func == "sort_values": + kwargs["by"] = ["a"] + + with pytest.raises(ValueError, match=msg): + getattr(dataframe, func)(**kwargs) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_aggregation.py b/venv/lib/python3.10/site-packages/pandas/tests/test_aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..7695c953712ed9925e4e804d0db1e8cf606a97eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_aggregation.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest + +from pandas.core.apply import ( + _make_unique_kwarg_list, + maybe_mangle_lambdas, +) + + +def test_maybe_mangle_lambdas_passthrough(): + assert maybe_mangle_lambdas("mean") == "mean" + assert maybe_mangle_lambdas(lambda x: x).__name__ == "" + # don't mangel single lambda. + assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "" + + +def test_maybe_mangle_lambdas_listlike(): + aggfuncs = [lambda x: 1, lambda x: 2] + result = maybe_mangle_lambdas(aggfuncs) + assert result[0].__name__ == "" + assert result[1].__name__ == "" + assert aggfuncs[0](None) == result[0](None) + assert aggfuncs[1](None) == result[1](None) + + +def test_maybe_mangle_lambdas(): + func = {"A": [lambda x: 0, lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + +def test_maybe_mangle_lambdas_args(): + func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + assert func["A"][0](0, 1) == (0, 1, 1) + assert func["A"][0](0, 1, 2) == (0, 1, 2) + assert func["A"][0](0, 2, b=3) == (0, 2, 3) + + +def test_maybe_mangle_lambdas_named(): + func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}} + result = maybe_mangle_lambdas(func) + assert result == func + + +@pytest.mark.parametrize( + "order, expected_reorder", + [ + ( + [ + ("height", ""), + ("height", "max"), + ("weight", "max"), + ("height", ""), + ("weight", ""), + ], + [ + ("height", "_0"), + ("height", "max"), + ("weight", "max"), + ("height", "_1"), + ("weight", ""), + ], + ), + ( + [ + ("col2", "min"), + ("col1", ""), + ("col1", ""), + ("col1", ""), + ], + [ + ("col2", "min"), + ("col1", "_0"), + ("col1", "_1"), + ("col1", "_2"), + ], + ), + ( + [("col", ""), ("col", ""), ("col", "")], + [("col", "_0"), ("col", "_1"), ("col", "_2")], + ), + ], +) +def test_make_unique(order, expected_reorder): + # GH 27519, test if make_unique function reorders correctly + result = _make_unique_kwarg_list(order) + + assert result == expected_reorder diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_algos.py b/venv/lib/python3.10/site-packages/pandas/tests/test_algos.py new file mode 100644 index 0000000000000000000000000000000000000000..718d1b3ee2e834507919cd1e46b2e2bead191589 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_algos.py @@ -0,0 +1,2041 @@ +from datetime import datetime +import struct + +import numpy as np +import pytest + +from pandas._libs import ( + algos as libalgos, + hashtable as ht, +) + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_complex_dtype, + is_float_dtype, + is_integer_dtype, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + Period, + PeriodIndex, + Series, + Timedelta, + Timestamp, + cut, + date_range, + timedelta_range, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) +import pandas.core.common as com + + +class TestFactorize: + def test_factorize_complex(self): + # GH#17927 + array = [1, 2, 2 + 1j] + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + labels, uniques = algos.factorize(array) + + expected_labels = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(labels, expected_labels) + + # Should return a complex dtype in the future + expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize(self, index_or_series_obj, sort): + obj = index_or_series_obj + result_codes, result_uniques = obj.factorize(sort=sort) + + constructor = Index + if isinstance(obj, MultiIndex): + constructor = MultiIndex.from_tuples + expected_arr = obj.unique() + if expected_arr.dtype == np.float16: + expected_arr = expected_arr.astype(np.float32) + expected_uniques = constructor(expected_arr) + if ( + isinstance(obj, Index) + and expected_uniques.dtype == bool + and obj.dtype == object + ): + expected_uniques = expected_uniques.astype(object) + + if sort: + expected_uniques = expected_uniques.sort_values() + + # construct an integer ndarray so that + # `expected_uniques.take(expected_codes)` is equal to `obj` + expected_uniques_list = list(expected_uniques) + expected_codes = [expected_uniques_list.index(val) for val in obj] + expected_codes = np.asarray(expected_codes, dtype=np.intp) + + tm.assert_numpy_array_equal(result_codes, expected_codes) + tm.assert_index_equal(result_uniques, expected_uniques, exact=True) + + def test_series_factorize_use_na_sentinel_false(self): + # GH#35667 + values = np.array([1, 2, 1, np.nan]) + ser = Series(values) + codes, uniques = ser.factorize(use_na_sentinel=False) + + expected_codes = np.array([0, 1, 0, 2], dtype=np.intp) + expected_uniques = Index([1.0, 2.0, np.nan]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_basic(self): + items = np.array(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object) + codes, uniques = algos.factorize(items) + tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object)) + + codes, uniques = algos.factorize(items, sort=True) + exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5, dtype=np.intp)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5.0)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + def test_mixed(self): + # doc example reshaping.rst + x = Series(["A", "A", np.nan, "B", 3.14, np.inf]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index(["A", "B", 3.14, np.inf]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index([3.14, np.inf, "A", "B"]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_datetime64(self): + # M8 + v1 = Timestamp("20130101 09:00:00.00004") + v2 = Timestamp("20130101") + x = Series([v1, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v1, v2]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v2, v1]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_period(self): + # period + v1 = Period("201302", freq="M") + v2 = Period("201303", freq="M") + x = Series([v1, v1, v1, v2, v2, v1]) + + # periods are not 'sorted' as they are converted back into an index + codes, uniques = algos.factorize(x) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + def test_factorize_timedelta(self): + # GH 5986 + v1 = to_timedelta("1 day 1 min") + v2 = to_timedelta("1 day") + x = Series([v1, v2, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v2, v1])) + + def test_factorize_nan(self): + # nan should map to na_sentinel, not reverse_indexer[na_sentinel] + # rizer.factorize should not raise an exception if na_sentinel indexes + # outside of reverse_indexer + key = np.array([1, 2, 1, np.nan], dtype="O") + rizer = ht.ObjectFactorizer(len(key)) + for na_sentinel in (-1, 20): + ids = rizer.factorize(key, na_sentinel=na_sentinel) + expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp) + assert len(set(key)) == len(set(expected)) + tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) + tm.assert_numpy_array_equal(ids, expected) + + def test_factorizer_with_mask(self): + # GH#49549 + data = np.array([1, 2, 3, 1, 1, 0], dtype="int64") + mask = np.array([False, False, False, False, False, True]) + rizer = ht.Int64Factorizer(len(data)) + result = rizer.factorize(data, mask=mask) + expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + def test_factorizer_object_with_nan(self): + # GH#49549 + data = np.array([1, 2, 3, 1, np.nan]) + rizer = ht.ObjectFactorizer(len(data)) + result = rizer.factorize(data.astype(object)) + expected = np.array([0, 1, 2, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype=object) + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), "nonsense"], + ), + ( + [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), (1, 2, 3)], + ), + ([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]), + ], + ) + def test_factorize_tuple_list(self, data, expected_codes, expected_uniques): + # GH9454 + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = pd.factorize(data) + + tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp)) + + expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques_array) + + def test_complex_sorting(self): + # gh 12666 - check no segfault + x17 = np.array([complex(i) for i in range(17)], dtype=object) + + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + algos.factorize(x17[::-1], sort=True) + + def test_numeric_dtype_factorize(self, any_real_numpy_dtype): + # GH41132 + dtype = any_real_numpy_dtype + data = np.array([1, 2, 2, 1], dtype=dtype) + expected_codes = np.array([0, 1, 1, 0], dtype=np.intp) + expected_uniques = np.array([1, 2], dtype=dtype) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_float64_factorize(self, writable): + data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp) + expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_uint64_factorize(self, writable): + data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_int64_factorize(self, writable): + data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_string_factorize(self, writable): + data = np.array(["a", "c", "a", "b", "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_object_factorize(self, writable): + data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_datetime64_factorize(self, writable): + # GH35650 Verify whether read-only datetime64 array can be factorized + data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]") + data.setflags(write=writable) + expected_codes = np.array([0], dtype=np.intp) + expected_uniques = np.array( + ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]" + ) + + codes, uniques = pd.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex(self, sort): + # increasing -> sort doesn't matter + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + result = algos.factorize(ri, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex_decreasing(self, sort): + # decreasing -> sort matters + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + ri2 = ri[::-1] + expected = expected[0], ri2 + if sort: + expected = expected[0][::-1], expected[1][::-1] + + result = algos.factorize(ri2, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri2.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + def test_deprecate_order(self): + # gh 19727 - check warning is raised for deprecated keyword, order. + # Test not valid once order keyword is removed. + data = np.array([2**63, 1, 2**63], dtype=np.uint64) + with pytest.raises(TypeError, match="got an unexpected keyword"): + algos.factorize(data, order=True) + with tm.assert_produces_warning(False): + algos.factorize(data) + + @pytest.mark.parametrize( + "data", + [ + np.array([0, 1, 0], dtype="u8"), + np.array([-(2**63), 1, -(2**63)], dtype="i8"), + np.array(["__nan__", "foo", "__nan__"], dtype="object"), + ], + ) + def test_parametrized_factorize_na_value_default(self, data): + # arrays that include the NA default for that type, but isn't used. + codes, uniques = algos.factorize(data) + expected_uniques = data[[0, 1]] + expected_codes = np.array([0, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, na_value", + [ + (np.array([0, 1, 0, 2], dtype="u8"), 0), + (np.array([1, 0, 1, 2], dtype="u8"), 1), + (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)), + (np.array([1, -(2**63), 1, 0], dtype="i8"), 1), + (np.array(["a", "", "a", "b"], dtype=object), "a"), + (np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()), + (np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)), + ], + ) + def test_parametrized_factorize_na_value(self, data, na_value): + codes, uniques = algos.factorize_array(data, na_value=na_value) + expected_uniques = data[[1, 3]] + expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize( + "data, uniques", + [ + ( + np.array(["b", "a", None, "b"], dtype=object), + np.array(["b", "a"], dtype=object), + ), + ( + pd.array([2, 1, np.nan, 2], dtype="Int64"), + pd.array([2, 1], dtype="Int64"), + ), + ], + ids=["numpy_array", "extension_array"], + ) + def test_factorize_use_na_sentinel(self, sort, data, uniques): + codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True) + if sort: + expected_codes = np.array([1, 0, -1, 1], dtype=np.intp) + expected_uniques = algos.safe_sort(uniques) + else: + expected_codes = np.array([0, 1, -1, 0], dtype=np.intp) + expected_uniques = uniques + tm.assert_numpy_array_equal(codes, expected_codes) + if isinstance(data, np.ndarray): + tm.assert_numpy_array_equal(uniques, expected_uniques) + else: + tm.assert_extension_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + ["a", None, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ( + ["a", np.nan, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ], + ) + def test_object_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize( + np.array(data, dtype=object), use_na_sentinel=False + ) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [1, None, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype="O"), + ), + ( + [1, np.nan, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype=np.float64), + ), + ], + ) + def test_int_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = algos.factorize(data, use_na_sentinel=False) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + Index(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")), + np.array([0, 0], dtype=np.intp), + DatetimeIndex(["2017"], tz="US/Eastern"), + ), + ], + ) + def test_factorize_mixed_values(self, data, expected_codes, expected_uniques): + # GH 19721 + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_factorize_interval_non_nano(self, unit): + # GH#56099 + left = DatetimeIndex(["2016-01-01", np.nan, "2015-10-11"]).as_unit(unit) + right = DatetimeIndex(["2016-01-02", np.nan, "2015-10-15"]).as_unit(unit) + idx = IntervalIndex.from_arrays(left, right) + codes, cats = idx.factorize() + assert cats.dtype == f"interval[datetime64[{unit}], right]" + + ts = Timestamp(0).as_unit(unit) + idx2 = IntervalIndex.from_arrays(left - ts, right - ts) + codes2, cats2 = idx2.factorize() + assert cats2.dtype == f"interval[timedelta64[{unit}], right]" + + idx3 = IntervalIndex.from_arrays( + left.tz_localize("US/Pacific"), right.tz_localize("US/Pacific") + ) + codes3, cats3 = idx3.factorize() + assert cats3.dtype == f"interval[datetime64[{unit}, US/Pacific], right]" + + +class TestUnique: + def test_ints(self): + arr = np.random.default_rng(2).integers(0, 100, size=50) + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_objects(self): + arr = np.random.default_rng(2).integers(0, 100, size=50).astype("O") + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_object_refcount_bug(self): + lst = np.array(["A", "B", "C", "D", "E"], dtype=object) + for i in range(1000): + len(algos.unique(lst)) + + def test_on_index_object(self): + mindex = MultiIndex.from_arrays( + [np.arange(5).repeat(5), np.tile(np.arange(5), 5)] + ) + expected = mindex.values + expected.sort() + + mindex = mindex.repeat(2) + + result = pd.unique(mindex) + result.sort() + + tm.assert_almost_equal(result, expected) + + def test_dtype_preservation(self, any_numpy_dtype): + # GH 15442 + if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES): + data = [1, 2, 2] + uniques = [1, 2] + elif is_integer_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1, 2] + elif is_float_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1.0, 2.0] + elif is_complex_dtype(any_numpy_dtype): + data = [complex(1, 0), complex(2, 0), complex(2, 0)] + uniques = [complex(1, 0), complex(2, 0)] + elif is_bool_dtype(any_numpy_dtype): + data = [True, True, False] + uniques = [True, False] + elif is_object_dtype(any_numpy_dtype): + data = ["A", "B", "B"] + uniques = ["A", "B"] + else: + # datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere + data = [1, 2, 2] + uniques = [1, 2] + + result = Series(data, dtype=any_numpy_dtype).unique() + expected = np.array(uniques, dtype=any_numpy_dtype) + + if any_numpy_dtype in tm.STRING_DTYPES: + expected = expected.astype(object) + + if expected.dtype.kind in ["m", "M"]: + # We get TimedeltaArray/DatetimeArray + assert isinstance(result, (DatetimeArray, TimedeltaArray)) + result = np.array(result) + tm.assert_numpy_array_equal(result, expected) + + def test_datetime64_dtype_array_returned(self): + # GH 9431 + expected = np.array( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ], + dtype="M8[ns]", + ) + + dt_index = to_datetime( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ] + ) + result = algos.unique(dt_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(dt_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_datetime_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]") + result = pd.unique(a) + expected = np.array(["2000", "2001"], dtype="datetime64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]") + result = pd.unique(a) + expected = np.array([2000, 2001], dtype="timedelta64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta64_dtype_array_returned(self): + # GH 9431 + expected = np.array([31200, 45678, 10000], dtype="m8[ns]") + + td_index = to_timedelta([31200, 45678, 31200, 10000, 45678]) + result = algos.unique(td_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(td_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_uint64_overflow(self): + s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) + exp = np.array([1, 2, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.unique(s), exp) + + def test_nan_in_object_array(self): + duplicated_items = ["a", np.nan, "c", "c"] + result = pd.unique(np.array(duplicated_items, dtype=object)) + expected = np.array(["a", np.nan, "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_categorical(self): + # we are expecting to return in the order + # of appearance + expected = Categorical(list("bac")) + + # we are expecting to return in the order + # of the categories + expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True) + + # GH 15939 + c = Categorical(list("baabc")) + result = c.unique() + tm.assert_categorical_equal(result, expected) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected) + + c = Categorical(list("baabc"), ordered=True) + result = c.unique() + tm.assert_categorical_equal(result, expected_o) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected_o) + + # Series of categorical dtype + s = Series(Categorical(list("baabc")), name="foo") + result = s.unique() + tm.assert_categorical_equal(result, expected) + + result = pd.unique(s) + tm.assert_categorical_equal(result, expected) + + # CI -> return CI + ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc"))) + expected = CategoricalIndex(expected) + result = ci.unique() + tm.assert_index_equal(result, expected) + + result = pd.unique(ci) + tm.assert_index_equal(result, expected) + + def test_datetime64tz_aware(self, unit): + # GH 15939 + + dti = Index( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + ser = Series(dti) + + result = ser.unique() + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = dti.unique() + expected = dti[:1] + tm.assert_index_equal(result, expected) + + result = pd.unique(ser) + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = pd.unique(dti) + expected = dti[:1] + tm.assert_index_equal(result, expected) + + def test_order_of_appearance(self): + # 9346 + # light testing of guarantee of order of appearance + # these also are the doc-examples + result = pd.unique(Series([2, 1, 3, 3])) + tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64")) + + result = pd.unique(Series([2] + [1] * 5)) + tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64")) + + msg = "unique with argument that is not not a Series, Index," + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(list("aabc")) + expected = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(Series(Categorical(list("aabc")))) + expected = Categorical(list("abc")) + tm.assert_categorical_equal(result, expected) + + def test_order_of_appearance_dt64(self, unit): + ser = Series([Timestamp("20160101"), Timestamp("20160101")]).dt.as_unit(unit) + result = pd.unique(ser) + expected = np.array(["2016-01-01T00:00:00.000000000"], dtype=f"M8[{unit}]") + tm.assert_numpy_array_equal(result, expected) + + def test_order_of_appearance_dt64tz(self, unit): + dti = DatetimeIndex( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + result = pd.unique(dti) + expected = DatetimeIndex( + ["2016-01-01 00:00:00"], dtype=f"datetime64[{unit}, US/Eastern]", freq=None + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "arg ,expected", + [ + (("1", "1", "2"), np.array(["1", "2"], dtype=object)), + (("foo",), np.array(["foo"], dtype=object)), + ], + ) + def test_tuple_with_strings(self, arg, expected): + # see GH 17108 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arg) + tm.assert_numpy_array_equal(result, expected) + + def test_obj_none_preservation(self): + # GH 20866 + arr = np.array(["foo", None], dtype=object) + result = pd.unique(arr) + expected = np.array(["foo", None], dtype=object) + + tm.assert_numpy_array_equal(result, expected, strict_nan=True) + + def test_signed_zero(self): + # GH 21866 + a = np.array([-0.0, 0.0]) + result = pd.unique(a) + expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent + tm.assert_numpy_array_equal(result, expected) + + def test_different_nans(self): + # GH 21866 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent + result = pd.unique(a) + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("el_type", [np.float64, object]) + def test_first_nan_kept(self, el_type): + # GH 22295 + # create different nans from bit-patterns: + bits_for_nan1 = 0xFFF8000000000001 + bits_for_nan2 = 0x7FF8000000000001 + NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2], dtype=el_type) + result = pd.unique(a) + assert result.size == 1 + # use bit patterns to identify which nan was kept: + result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0] + assert result_nan_bits == bits_for_nan1 + + def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2): + # GH 22295 + if unique_nulls_fixture is unique_nulls_fixture2: + return # skip it, values not unique + a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) + result = pd.unique(a) + assert result.size == 2 + assert a[0] is unique_nulls_fixture + assert a[1] is unique_nulls_fixture2 + + def test_unique_masked(self, any_numeric_ea_dtype): + # GH#48019 + ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype) + result = pd.unique(ser) + expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_nunique_ints(index_or_series_or_array): + # GH#36327 + values = index_or_series_or_array(np.random.default_rng(2).integers(0, 20, 30)) + result = algos.nunique_ints(values) + expected = len(algos.unique(values)) + assert result == expected + + +class TestIsin: + def test_invalid(self): + msg = ( + r"only list-like objects are allowed to be passed to isin\(\), " + r"you passed a `int`" + ) + with pytest.raises(TypeError, match=msg): + algos.isin(1, 1) + with pytest.raises(TypeError, match=msg): + algos.isin(1, [1]) + with pytest.raises(TypeError, match=msg): + algos.isin([1], 1) + + def test_basic(self): + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin([1, 2], [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(np.array([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), Series([1])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), {1}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], ["a"]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), Series(["a"])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), {"a"}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], [1]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_i8(self): + arr = date_range("20130101", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + arr = timedelta_range("1 day", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"]) + def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1): + # Anything but object and we get all-False shortcut + + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype1) + + comps = arr.view("i8").astype(dtype) + + result = algos.isin(comps, arr) + expected = np.zeros(comps.shape, dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_large(self): + s = date_range("20000101", periods=2000000, freq="s").values + result = algos.isin(s, s[0:2]) + expected = np.zeros(len(s), dtype=bool) + expected[0] = True + expected[1] = True + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + def test_isin_datetimelike_all_nat(self, dtype): + # GH#56427 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + arr[0] = NaT + result = algos.isin(arr, [NaT]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]"]) + def test_isin_datetimelike_strings_deprecated(self, dtype): + # GH#53111 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + vals = [str(x) for x in arr] + msg = "The behavior of 'isin' with dtype=.* is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = algos.isin(arr, vals) + assert res.all() + + vals2 = np.array(vals, dtype=str) + with tm.assert_produces_warning(FutureWarning, match=msg): + res2 = algos.isin(arr, vals2) + assert res2.all() + + def test_isin_dt64tz_with_nat(self): + # the all-NaT values used to get inferred to tznaive, which was evaluated + # as non-matching GH#56427 + dti = date_range("2016-01-01", periods=3, tz="UTC") + ser = Series(dti) + ser[0] = NaT + + res = algos.isin(ser._values, [NaT]) + exp = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(res, exp) + + def test_categorical_from_codes(self): + # GH 16639 + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + Sd = Series(Categorical([1]).from_codes(vals, cats)) + St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats)) + expected = np.array([True, True, False, True]) + result = algos.isin(Sd, St) + tm.assert_numpy_array_equal(expected, result) + + def test_categorical_isin(self): + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + cat = Categorical([1]).from_codes(vals, cats) + other = Categorical([1]).from_codes(np.array([0, 1]), cats) + + expected = np.array([True, True, False, True]) + result = algos.isin(cat, other) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in(self): + # GH 22160 + # nan is special, because from " a is b" doesn't follow "a == b" + # at least, isin() should follow python's "np.nan in [nan] == True" + # casting to -> np.float64 -> another float-object somewhere on + # the way could lead jeopardize this behavior + comps = [np.nan] # could be casted to float64 + values = [np.nan] + expected = np.array([True]) + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in_large(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + s[0] = np.nan + result = algos.isin(s, np.array([np.nan, 1])) + expected = np.ones(len(s), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_same_nan_is_in_large_series(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + series = Series(s) + s[0] = np.nan + result = series.isin(np.array([np.nan, 1])) + expected = Series(np.ones(len(s), dtype=bool)) + tm.assert_series_equal(result, expected) + + def test_same_object_is_in(self): + # GH 22160 + # there could be special treatment for nans + # the user however could define a custom class + # with similar behavior, then we at least should + # fall back to usual python's behavior: "a in [a] == True" + class LikeNan: + def __eq__(self, other) -> bool: + return False + + def __hash__(self): + return 0 + + a, b = LikeNan(), LikeNan() + + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + # same object -> True + tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True])) + # different objects -> False + tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False])) + + def test_different_nans(self): + # GH 22160 + # all nans are handled as equivalent + + comps = [float("nan")] + values = [float("nan")] + assert comps[0] is not values[0] # different nan-objects + + # as list of python-objects: + result = algos.isin(np.array(comps), values) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as object-array: + result = algos.isin( + np.asarray(comps, dtype=object), np.asarray(values, dtype=object) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as float64-array: + result = algos.isin( + np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + def test_no_cast(self): + # GH 22160 + # ensure 42 is not casted to a string + comps = ["ss", 42] + values = ["42"] + expected = np.array([False, False]) + msg = "isin with argument that is not not a Series, Index" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_empty(self, empty): + # see gh-16991 + vals = Index(["a", "b"]) + expected = np.array([False, False]) + + result = algos.isin(vals, empty) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nan_objects(self): + # GH 22119 + comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object) + vals = np.array([float("nan")], dtype=object) + expected = np.array([False, False, True]) + result = algos.isin(comps, vals) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nans_as_float64(self): + # GH 21866 + # create different nans from bit-patterns, + # these nans will land in different buckets in the hash-table + # if no special care is taken + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + + # check that NAN1 and NAN2 are equivalent: + arr = np.array([NAN1, NAN2], dtype=np.float64) + lookup1 = np.array([NAN1], dtype=np.float64) + result = algos.isin(arr, lookup1) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + lookup2 = np.array([NAN2], dtype=np.float64) + result = algos.isin(arr, lookup2) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_int_df_string_search(self): + """Comparing df with int`s (1,2) with a string at isin() ("1") + -> should not match values because int 1 is not equal str 1""" + df = DataFrame({"values": [1, 2]}) + result = df.isin(["1"]) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_nan_df_string_search(self): + """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN") + -> should not match values because np.nan is not equal str NaN""" + df = DataFrame({"values": [np.nan, 2]}) + result = df.isin(np.array(["NaN"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_float_df_string_search(self): + """Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245") + -> should not match values because float 1.4245 is not equal str 1.4245""" + df = DataFrame({"values": [1.4245, 2.32441]}) + result = df.isin(np.array(["1.4245"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_unsigned_dtype(self): + # GH#46485 + ser = Series([1378774140726870442], dtype=np.uint64) + result = ser.isin([1378774140726870528]) + expected = Series(False) + tm.assert_series_equal(result, expected) + + +class TestValueCounts: + def test_value_counts(self): + arr = np.random.default_rng(1234).standard_normal(4) + factor = cut(arr, 4) + + # assert isinstance(factor, n) + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(factor) + breaks = [-1.606, -1.018, -0.431, 0.155, 0.741] + index = IntervalIndex.from_breaks(breaks).astype(CategoricalDtype(ordered=True)) + expected = Series([1, 0, 2, 1], index=index, name="count") + tm.assert_series_equal(result.sort_index(), expected.sort_index()) + + def test_value_counts_bins(self): + s = [1, 2, 3, 4] + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=1) + expected = Series( + [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count" + ) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=2, sort=False) + expected = Series( + [2, 2], + index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]), + name="count", + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_dtypes(self): + msg2 = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0])) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0]), bins=1) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(Series([1, 1.0, "1"])) # object + assert len(result) == 2 + + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + algos.value_counts(np.array(["1", 1], dtype=object), bins=1) + + def test_value_counts_nat(self): + td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]") + dt = to_datetime(["NaT", "2014-01-01"]) + + msg = "pandas.value_counts is deprecated" + + for ser in [td, dt]: + with tm.assert_produces_warning(FutureWarning, match=msg): + vc = algos.value_counts(ser) + vc_with_na = algos.value_counts(ser, dropna=False) + assert len(vc) == 1 + assert len(vc_with_na) == 2 + + exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_dt = algos.value_counts(dt) + tm.assert_series_equal(result_dt, exp_dt) + + exp_td = Series({np.timedelta64(10000): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_td = algos.value_counts(td) + tm.assert_series_equal(result_td, exp_td) + + @pytest.mark.parametrize("dtype", [object, "M8[us]"]) + def test_value_counts_datetime_outofbounds(self, dtype): + # GH 13663 + ser = Series( + [ + datetime(3000, 1, 1), + datetime(5000, 1, 1), + datetime(5000, 1, 1), + datetime(6000, 1, 1), + datetime(3000, 1, 1), + datetime(3000, 1, 1), + ], + dtype=dtype, + ) + res = ser.value_counts() + + exp_index = Index( + [datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)], + dtype=dtype, + ) + exp = Series([3, 2, 1], index=exp_index, name="count") + tm.assert_series_equal(res, exp) + + def test_categorical(self): + s = Series(Categorical(list("aaabbc"))) + result = s.value_counts() + expected = Series( + [3, 2, 1], index=CategoricalIndex(["a", "b", "c"]), name="count" + ) + + tm.assert_series_equal(result, expected, check_index_type=True) + + # preserve order? + s = s.cat.as_ordered() + result = s.value_counts() + expected.index = expected.index.as_ordered() + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_nans(self): + s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]), name="count" + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + # out of order + s = Series( + Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"]) + ) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex( + ["a", "b", "c"], + categories=["b", "a", "c"], + ordered=True, + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], + index=CategoricalIndex( + ["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_zeroes(self): + # keep the `d` category with 0 + s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True)) + result = s.value_counts() + expected = Series( + [3, 2, 1, 0], + index=Categorical( + ["b", "a", "c", "d"], categories=list("abcd"), ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_value_counts_dropna(self): + # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 + + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=True), + Series([2, 1], index=[True, False], name="count"), + ) + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=False), + Series([2, 1], index=[True, False], name="count"), + ) + + tm.assert_series_equal( + Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True), + Series([3, 2], index=Index([True, False], dtype=object), name="count"), + ) + tm.assert_series_equal( + Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False), + Series([5, 3, 2], index=[True, False, None], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=False), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + tm.assert_series_equal( + Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False) + expected = Series([3, 2, 1], index=[5.0, 10.3, None], name="count") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]")) + def test_value_counts_normalized(self, dtype): + # GH12558 + s = Series([1] * 2 + [2] * 3 + [np.nan] * 5) + s_typed = s.astype(dtype) + result = s_typed.value_counts(normalize=True, dropna=False) + expected = Series( + [0.5, 0.3, 0.2], + index=Series([np.nan, 2.0, 1.0], dtype=dtype), + name="proportion", + ) + tm.assert_series_equal(result, expected) + + result = s_typed.value_counts(normalize=True, dropna=True) + expected = Series( + [0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype), name="proportion" + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_uint64(self): + arr = np.array([2**63], dtype=np.uint64) + expected = Series([1], index=[2**63], name="count") + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + arr = np.array([-1, 2**63], dtype=object) + expected = Series([1, 1], index=[-1, 2**63], name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + def test_value_counts_series(self): + # GH#54857 + values = np.array([3, 1, 2, 3, 4, np.nan]) + result = Series(values).value_counts(bins=3) + expected = Series( + [2, 2, 1], + index=IntervalIndex.from_tuples( + [(0.996, 2.0), (2.0, 3.0), (3.0, 4.0)], dtype="interval[float64, right]" + ), + name="count", + ) + tm.assert_series_equal(result, expected) + + +class TestDuplicated: + def test_duplicated_with_nas(self): + keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object) + + result = algos.duplicated(keys) + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="first") + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array([True, False, True, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + keys = np.empty(8, dtype=object) + for i, t in enumerate( + zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2) + ): + keys[i] = t + + result = algos.duplicated(keys) + falses = [False] * 4 + trues = [True] * 4 + expected = np.array(falses + trues) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array(trues + falses) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array(trues + trues) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "case", + [ + np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]), + np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), + np.array( + [ + 1 + 1j, + 2 + 2j, + 1 + 1j, + 5 + 5j, + 3 + 3j, + 2 + 2j, + 4 + 4j, + 1 + 1j, + 5 + 5j, + 6 + 6j, + ] + ), + np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object), + np.array( + [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64 + ), + ], + ) + def test_numeric_object_likes(self, case): + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [Index(case), Index(case, dtype="category")]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [Series(case), Series(case, dtype="category")]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + def test_datetime_likes(self): + dt = [ + "2011-01-01", + "2011-01-02", + "2011-01-01", + "NaT", + "2011-01-03", + "2011-01-02", + "2011-01-04", + "2011-01-01", + "NaT", + "2011-01-06", + ] + td = [ + "1 days", + "2 days", + "1 days", + "NaT", + "3 days", + "2 days", + "4 days", + "1 days", + "NaT", + "6 days", + ] + + cases = [ + np.array([Timestamp(d) for d in dt]), + np.array([Timestamp(d, tz="US/Eastern") for d in dt]), + np.array([Period(d, freq="D") for d in dt]), + np.array([np.datetime64(d) for d in dt]), + np.array([Timedelta(d) for d in td]), + ] + + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + for case in cases: + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [ + Index(case), + Index(case, dtype="category"), + Index(case, dtype=object), + ]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [ + Series(case), + Series(case, dtype="category"), + Series(case, dtype=object), + ]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)]) + def test_unique_index(self, case): + assert case.is_unique is True + tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False])) + + @pytest.mark.parametrize( + "arr, uniques", + [ + ( + [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)], + [(0, 0), (0, 1), (1, 0), (1, 1)], + ), + ( + [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")], + [("b", "c"), ("a", "b")], + ), + ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]), + ], + ) + def test_unique_tuples(self, arr, uniques): + # https://github.com/pandas-dev/pandas/issues/16519 + expected = np.empty(len(uniques), dtype=object) + expected[:] = uniques + + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arr) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "array,expected", + [ + ( + [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j], + # Should return a complex dtype in the future + np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object), + ) + ], + ) + def test_unique_complex_numbers(self, array, expected): + # GH 17927 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(array) + tm.assert_numpy_array_equal(result, expected) + + +class TestHashTable: + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_unique(self, htable, data, writable): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.unique() + expected_unique = s_duplicated.drop_duplicates(keep="first").values + result_unique = htable().unique(s_duplicated.values) + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # test return_inverse=True + # reconstruction can only succeed if the inverse is correct + result_unique, result_inverse = htable().unique( + s_duplicated.values, return_inverse=True + ) + tm.assert_numpy_array_equal(result_unique, expected_unique) + reconstr = result_unique[result_inverse] + tm.assert_numpy_array_equal(reconstr, s_duplicated.values) + + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_factorize(self, htable, writable, data): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + na_mask = s_duplicated.isna().values + + result_unique, result_inverse = htable().factorize(s_duplicated.values) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.factorize() + # since factorize removes all NaNs, we do the same here + expected_unique = s_duplicated.dropna().drop_duplicates().values + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # reconstruction can only succeed if the inverse is correct. Since + # factorize removes the NaNs, those have to be excluded here as well + result_reconstruct = result_unique[result_inverse[~na_mask]] + expected_reconstruct = s_duplicated.dropna().values + tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct) + + +class TestRank: + @pytest.mark.parametrize( + "arr", + [ + [np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan], + [4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan], + ], + ) + def test_scipy_compat(self, arr): + sp_stats = pytest.importorskip("scipy.stats") + + arr = np.array(arr) + + mask = ~np.isfinite(arr) + arr = arr.copy() + result = libalgos.rank_1d(arr) + arr[mask] = np.inf + exp = sp_stats.rankdata(arr) + exp[mask] = np.nan + tm.assert_almost_equal(result, exp) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_basic(self, writable, dtype): + exp = np.array([1, 2], dtype=np.float64) + + data = np.array([1, 100], dtype=dtype) + data.setflags(write=writable) + ser = Series(data) + result = algos.rank(ser) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("dtype", [np.float64, np.uint64]) + def test_uint64_overflow(self, dtype): + exp = np.array([1, 2], dtype=np.float64) + + s = Series([1, 2**63], dtype=dtype) + tm.assert_numpy_array_equal(algos.rank(s), exp) + + def test_too_many_ndims(self): + arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) + msg = "Array with ndim > 2 are not supported" + + with pytest.raises(TypeError, match=msg): + algos.rank(arr) + + @pytest.mark.single_cpu + def test_pct_max_many_rows(self): + # GH 18271 + values = np.arange(2**24 + 1) + result = algos.rank(values, pct=True).max() + assert result == 1 + + values = np.arange(2**25 + 2).reshape(2**24 + 1, 2) + result = algos.rank(values, pct=True).max() + assert result == 1 + + +class TestMode: + def test_no_mode(self): + exp = Series([], dtype=np.float64, index=Index([], dtype=int)) + tm.assert_numpy_array_equal(algos.mode(np.array([])), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_mode_single(self, dt): + # GH 15714 + exp_single = [1] + data_single = [1] + + exp_multi = [1] + data_multi = [1, 1] + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mode_obj_int(self): + exp = Series([1], dtype=int) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + exp = Series(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_number_mode(self, dt): + exp_single = [1] + data_single = [1] * 5 + [2] * 3 + + exp_multi = [1, 3] + data_multi = [1] * 5 + [2] * 3 + [3] * 5 + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_strobj_mode(self): + exp = ["b"] + data = ["a"] * 2 + ["b"] * 3 + + ser = Series(data, dtype="c") + exp = Series(exp, dtype="c") + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + @pytest.mark.parametrize("dt", [str, object]) + def test_strobj_multi_char(self, dt): + exp = ["bar"] + data = ["foo"] * 2 + ["bar"] * 3 + + ser = Series(data, dtype=dt) + exp = Series(exp, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_datelike_mode(self): + exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series( + ["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"], + dtype="M8[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_timedelta_mode(self): + exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]") + ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]") + ser = Series( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mixed_dtype(self): + exp = Series(["foo"], dtype=object) + ser = Series([1, "foo", "foo"]) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_uint64_overflow(self): + exp = Series([2**63], dtype=np.uint64) + ser = Series([1, 2**63, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series([1, 2**63], dtype=np.uint64) + ser = Series([1, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_categorical(self): + c = Categorical([1, 2]) + exp = c + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, "a", "a"]) + exp = Categorical(["a"], categories=[1, "a"]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, 1, 2, 3, 3]) + exp = Categorical([1, 3], categories=[1, 2, 3]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + def test_index(self): + idx = Index([1, 2, 3]) + exp = Series([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, "a", "a"]) + exp = Series(["a"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, 1, 2, 3, 3]) + exp = Series([1, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + with pytest.raises(AttributeError, match="TimedeltaIndex"): + # algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex + algos.mode(idx) + + def test_ser_mode_with_name(self): + # GH 46737 + ser = Series([1, 1, 3], name="foo") + result = ser.mode() + expected = Series([1], name="foo") + tm.assert_series_equal(result, expected) + + +class TestDiff: + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_diff_datetimelike_nat(self, dtype): + # NaT - NaT is NaT, not 0 + arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4) + arr[:, 2] = arr.dtype.type("NaT", "ns") + result = algos.diff(arr, 1, axis=0) + + expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4 + expected[:, 2] = np.timedelta64("NaT", "ns") + expected[0, :] = np.timedelta64("NaT", "ns") + + tm.assert_numpy_array_equal(result, expected) + + result = algos.diff(arr.T, 1, axis=1) + tm.assert_numpy_array_equal(result, expected.T) + + def test_diff_ea_axis(self): + dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data + + msg = "cannot diff DatetimeArray on axis=1" + with pytest.raises(ValueError, match=msg): + algos.diff(dta, 1, axis=1) + + @pytest.mark.parametrize("dtype", ["int8", "int16"]) + def test_diff_low_precision_int(self, dtype): + arr = np.array([0, 1, 1, 0, 0], dtype=dtype) + result = algos.diff(arr, 1) + expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [np.array, pd.array]) +def test_union_with_duplicates(op): + # GH#36289 + lvals = op([3, 1, 3, 4]) + rvals = op([2, 3, 1, 1]) + expected = op([3, 3, 1, 1, 4, 2]) + if isinstance(expected, np.ndarray): + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_numpy_array_equal(result, expected) + else: + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_common.py b/venv/lib/python3.10/site-packages/pandas/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a1c961c8cb6e5b1014f6baa193d4593d85d981 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_common.py @@ -0,0 +1,267 @@ +import collections +from functools import partial +import string +import subprocess +import sys +import textwrap + +import numpy as np +import pytest + +import pandas as pd +from pandas import Series +import pandas._testing as tm +from pandas.core import ops +import pandas.core.common as com +from pandas.util.version import Version + + +def test_get_callable_name(): + getname = com.get_callable_name + + def fn(x): + return x + + lambda_ = lambda x: x + part1 = partial(fn) + part2 = partial(part1) + + class somecall: + def __call__(self): + # This shouldn't actually get called below; somecall.__init__ + # should. + raise NotImplementedError + + assert getname(fn) == "fn" + assert getname(lambda_) + assert getname(part1) == "fn" + assert getname(part2) == "fn" + assert getname(somecall()) == "somecall" + assert getname(1) is None + + +def test_any_none(): + assert com.any_none(1, 2, 3, None) + assert not com.any_none(1, 2, 3, 4) + + +def test_all_not_none(): + assert com.all_not_none(1, 2, 3, 4) + assert not com.all_not_none(1, 2, 3, None) + assert not com.all_not_none(None, None, None, None) + + +def test_random_state(): + # Check with seed + state = com.random_state(5) + assert state.uniform() == np.random.RandomState(5).uniform() + + # Check with random state object + state2 = np.random.RandomState(10) + assert com.random_state(state2).uniform() == np.random.RandomState(10).uniform() + + # check with no arg random state + assert com.random_state() is np.random + + # check array-like + # GH32503 + state_arr_like = np.random.default_rng(None).integers( + 0, 2**31, size=624, dtype="uint32" + ) + assert ( + com.random_state(state_arr_like).uniform() + == np.random.RandomState(state_arr_like).uniform() + ) + + # Check BitGenerators + # GH32503 + assert ( + com.random_state(np.random.MT19937(3)).uniform() + == np.random.RandomState(np.random.MT19937(3)).uniform() + ) + assert ( + com.random_state(np.random.PCG64(11)).uniform() + == np.random.RandomState(np.random.PCG64(11)).uniform() + ) + + # Error for floats or strings + msg = ( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + with pytest.raises(ValueError, match=msg): + com.random_state("test") + + with pytest.raises(ValueError, match=msg): + com.random_state(5.5) + + +@pytest.mark.parametrize( + "left, right, expected", + [ + (Series([1], name="x"), Series([2], name="x"), "x"), + (Series([1], name="x"), Series([2], name="y"), None), + (Series([1]), Series([2], name="x"), None), + (Series([1], name="x"), Series([2]), None), + (Series([1], name="x"), [2], "x"), + ([1], Series([2], name="y"), "y"), + # matching NAs + (Series([1], name=np.nan), pd.Index([], name=np.nan), np.nan), + (Series([1], name=np.nan), pd.Index([], name=pd.NaT), None), + (Series([1], name=pd.NA), pd.Index([], name=pd.NA), pd.NA), + # tuple name GH#39757 + ( + Series([1], name=np.int64(1)), + pd.Index([], name=(np.int64(1), np.int64(2))), + None, + ), + ( + Series([1], name=(np.int64(1), np.int64(2))), + pd.Index([], name=(np.int64(1), np.int64(2))), + (np.int64(1), np.int64(2)), + ), + pytest.param( + Series([1], name=(np.float64("nan"), np.int64(2))), + pd.Index([], name=(np.float64("nan"), np.int64(2))), + (np.float64("nan"), np.int64(2)), + marks=pytest.mark.xfail( + reason="Not checking for matching NAs inside tuples." + ), + ), + ], +) +def test_maybe_match_name(left, right, expected): + res = ops.common._maybe_match_name(left, right) + assert res is expected or res == expected + + +def test_standardize_mapping(): + # No uninitialized defaultdicts + msg = r"to_dict\(\) only accepts initialized defaultdicts" + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(collections.defaultdict) + + # No non-mapping subtypes, instance + msg = "unsupported type: " + with pytest.raises(TypeError, match=msg): + com.standardize_mapping([]) + + # No non-mapping subtypes, class + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(list) + + fill = {"bad": "data"} + assert com.standardize_mapping(fill) == dict + + # Convert instance to type + assert com.standardize_mapping({}) == dict + + dd = collections.defaultdict(list) + assert isinstance(com.standardize_mapping(dd), partial) + + +def test_git_version(): + # GH 21295 + git_version = pd.__git_version__ + assert len(git_version) == 40 + assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = Version(pd.__version__) + try: + version > Version("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + ) + + +@pytest.mark.parametrize( + "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] +) +def test_serializable(obj): + # GH 35611 + unpickled = tm.round_trip_pickle(obj) + assert type(obj) == type(unpickled) + + +class TestIsBoolIndexer: + def test_non_bool_array_with_na(self): + # in particular, this should not raise + arr = np.array(["A", "B", np.nan], dtype=object) + assert not com.is_bool_indexer(arr) + + def test_list_subclass(self): + # GH#42433 + + class MyList(list): + pass + + val = MyList(["a"]) + + assert not com.is_bool_indexer(val) + + val = MyList([True]) + assert com.is_bool_indexer(val) + + def test_frozenlist(self): + # GH#42461 + data = {"col1": [1, 2], "col2": [3, 4]} + df = pd.DataFrame(data=data) + + frozen = df.index.names[1:] + assert not com.is_bool_indexer(frozen) + + result = df[frozen] + expected = df[[]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("with_exception", [True, False]) +def test_temp_setattr(with_exception): + # GH#45954 + ser = Series(dtype=object) + ser.name = "first" + # Raise a ValueError in either case to satisfy pytest.raises + match = "Inside exception raised" if with_exception else "Outside exception raised" + with pytest.raises(ValueError, match=match): + with com.temp_setattr(ser, "name", "second"): + assert ser.name == "second" + if with_exception: + raise ValueError("Inside exception raised") + raise ValueError("Outside exception raised") + assert ser.name == "first" + + +@pytest.mark.single_cpu +def test_str_size(): + # GH#21758 + a = "a" + expected = sys.getsizeof(a) + pyexe = sys.executable.replace("\\", "/") + call = [ + pyexe, + "-c", + "a='a';import sys;sys.getsizeof(a);import pandas;print(sys.getsizeof(a));", + ] + result = subprocess.check_output(call).decode()[-4:-1].strip("\n") + assert int(result) == int(expected) + + +@pytest.mark.single_cpu +def test_bz2_missing_import(): + # Check whether bz2 missing import is handled correctly (issue #53857) + code = """ + import sys + sys.modules['bz2'] = None + import pytest + import pandas as pd + from pandas.compat import get_bz2_file + msg = 'bz2 module not available.' + with pytest.raises(RuntimeError, match=msg): + get_bz2_file() + """ + code = textwrap.dedent(code) + call = [sys.executable, "-c", code] + subprocess.check_output(call) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_downstream.py b/venv/lib/python3.10/site-packages/pandas/tests/test_downstream.py new file mode 100644 index 0000000000000000000000000000000000000000..51ce73ef54300c5d2f9be4e3988de77843a61a8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_downstream.py @@ -0,0 +1,362 @@ +""" +Testing that we work in the downstream packages +""" +import array +import subprocess +import sys + +import numpy as np +import pytest + +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +@pytest.fixture +def df(): + return DataFrame({"A": [1, 2, 3]}) + + +def test_dask(df): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + pytest.importorskip("toolz") + dd = pytest.importorskip("dask.dataframe") + + ddf = dd.from_pandas(df, npartitions=3) + assert ddf.A is not None + assert ddf.compute() is not None + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_dask_ufunc(): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + dd = pytest.importorskip("dask.dataframe") + + s = Series([1.5, 2.3, 3.7, 4.0]) + ds = dd.from_pandas(s, npartitions=2) + + result = da.fix(ds).compute() + expected = np.fix(s) + tm.assert_series_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_construct_dask_float_array_int_dtype_match_ndarray(): + # GH#40110 make sure we treat a float-dtype dask array with the same + # rules we would for an ndarray + dd = pytest.importorskip("dask.dataframe") + + arr = np.array([1, 2.5, 3]) + darr = dd.from_array(arr) + + res = Series(darr) + expected = Series(arr) + tm.assert_series_equal(res, expected) + + # GH#49599 in 2.0 we raise instead of silently ignoring the dtype + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Series(darr, dtype="i8") + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + arr[2] = np.nan + with pytest.raises(IntCastingNaNError, match=msg): + Series(darr, dtype="i8") + # which is the same as we get with a numpy input + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr, dtype="i8") + + +def test_xarray(df): + pytest.importorskip("xarray") + + assert df.to_xarray() is not None + + +def test_xarray_cftimeindex_nearest(): + # https://github.com/pydata/xarray/issues/3751 + cftime = pytest.importorskip("cftime") + xarray = pytest.importorskip("xarray") + + times = xarray.cftime_range("0001", periods=2) + key = cftime.DatetimeGregorian(2000, 1, 1) + result = times.get_indexer([key], method="nearest") + expected = 1 + assert result == expected + + +@pytest.mark.single_cpu +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) + + +@pytest.mark.single_cpu +def test_oo_optimized_datetime_index_unpickle(): + # GH 42866 + subprocess.check_call( + [ + sys.executable, + "-OO", + "-c", + ( + "import pandas as pd, pickle; " + "pickle.loads(pickle.dumps(pd.date_range('2021-01-01', periods=1)))" + ), + ] + ) + + +def test_statsmodels(): + smf = pytest.importorskip("statsmodels.formula.api") + + df = DataFrame( + {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)} + ) + smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit() + + +def test_scikit_learn(): + pytest.importorskip("sklearn") + from sklearn import ( + datasets, + svm, + ) + + digits = datasets.load_digits() + clf = svm.SVC(gamma=0.001, C=100.0) + clf.fit(digits.data[:-1], digits.target[:-1]) + clf.predict(digits.data[-1:]) + + +def test_seaborn(): + seaborn = pytest.importorskip("seaborn") + tips = DataFrame( + {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)} + ) + seaborn.stripplot(x="day", y="total_bill", data=tips) + + +def test_pandas_datareader(): + pytest.importorskip("pandas_datareader") + + +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") +def test_pyarrow(df): + pyarrow = pytest.importorskip("pyarrow") + table = pyarrow.Table.from_pandas(df) + result = table.to_pandas() + tm.assert_frame_equal(result, df) + + +def test_yaml_dump(df): + # GH#42748 + yaml = pytest.importorskip("yaml") + + dumped = yaml.dump(df) + + loaded = yaml.load(dumped, Loader=yaml.Loader) + tm.assert_frame_equal(df, loaded) + + loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader) + tm.assert_frame_equal(df, loaded2) + + +@pytest.mark.single_cpu +def test_missing_required_dependency(): + # GH 23868 + # To ensure proper isolation, we pass these flags + # -S : disable site-packages + # -s : disable user site-packages + # -E : disable PYTHON* env vars, especially PYTHONPATH + # https://github.com/MacPython/pandas-wheels/pull/50 + + pyexe = sys.executable.replace("\\", "/") + + # We skip this test if pandas is installed as a site package. We first + # import the package normally and check the path to the module before + # executing the test which imports pandas with site packages disabled. + call = [pyexe, "-c", "import pandas;print(pandas.__file__)"] + output = subprocess.check_output(call).decode() + if "site-packages" in output: + pytest.skip("pandas installed as site package") + + # This test will fail if pandas is installed as a site package. The flags + # prevent pandas being imported and the test will report Failed: DID NOT + # RAISE + call = [pyexe, "-sSE", "-c", "import pandas"] + + msg = ( + rf"Command '\['{pyexe}', '-sSE', '-c', 'import pandas'\]' " + "returned non-zero exit status 1." + ) + + with pytest.raises(subprocess.CalledProcessError, match=msg) as exc: + subprocess.check_output(call, stderr=subprocess.STDOUT) + + output = exc.value.stdout.decode() + for name in ["numpy", "pytz", "dateutil"]: + assert name in output + + +def test_frame_setitem_dask_array_into_new_col(): + # GH#47128 + + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + + dda = da.array([1, 2]) + df = DataFrame({"a": ["a", "b"]}) + df["b"] = dda + df["c"] = dda + df.loc[[False, True], "b"] = 100 + result = df.loc[[1], :] + expected = DataFrame({"a": ["b"], "b": [100], "c": [2]}, index=[1]) + tm.assert_frame_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_pandas_priority(): + # GH#48347 + + class MyClass: + __pandas_priority__ = 5000 + + def __radd__(self, other): + return self + + left = MyClass() + right = Series(range(3)) + + assert right.__add__(left) is NotImplemented + assert right + left is left + + +@pytest.fixture( + params=[ + "memoryview", + "array", + pytest.param("dask", marks=td.skip_if_no("dask.array")), + pytest.param("xarray", marks=td.skip_if_no("xarray")), + ] +) +def array_likes(request): + """ + Fixture giving a numpy array and a parametrized 'data' object, which can + be a memoryview, array, dask or xarray object created from the numpy array. + """ + # GH#24539 recognize e.g xarray, dask, ... + arr = np.array([1, 2, 3], dtype=np.int64) + + name = request.param + if name == "memoryview": + data = memoryview(arr) + elif name == "array": + data = array.array("i", arr) + elif name == "dask": + import dask.array + + data = dask.array.array(arr) + elif name == "xarray": + import xarray as xr + + data = xr.DataArray(arr) + + return arr, data + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_obscure_array(dtype, array_likes): + # GH#24539 recognize e.g xarray, dask, ... + # Note: we dont do this for PeriodArray bc _from_sequence won't accept + # an array of integers + # TODO: could check with arraylike of Period objects + arr, data = array_likes + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + depr_msg = f"{cls.__name__}.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = cls(arr) + result = cls._from_sequence(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + if not isinstance(data, memoryview): + # FIXME(GH#44431) these raise on memoryview and attempted fix + # fails on py3.10 + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) + + +def test_dataframe_consortium() -> None: + """ + Test some basic methods of the dataframe consortium standard. + + Full testing is done at https://github.com/data-apis/dataframe-api-compat, + this is just to check that the entry point works as expected. + """ + pytest.importorskip("dataframe_api_compat") + df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = df_pd.__dataframe_consortium_standard__() + result_1 = df.get_column_names() + expected_1 = ["a", "b"] + assert result_1 == expected_1 + + ser = Series([1, 2, 3], name="a") + col = ser.__column_consortium_standard__() + assert col.name == "a" + + +def test_xarray_coerce_unit(): + # GH44053 + xr = pytest.importorskip("xarray") + + arr = xr.DataArray([1, 2, 3]) + result = pd.to_datetime(arr, unit="ns") + expected = DatetimeIndex( + [ + "1970-01-01 00:00:00.000000001", + "1970-01-01 00:00:00.000000002", + "1970-01-01 00:00:00.000000003", + ], + dtype="datetime64[ns]", + freq=None, + ) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_errors.py b/venv/lib/python3.10/site-packages/pandas/tests/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..aeddc08e4b888c0937a3095a46003613e0115876 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_errors.py @@ -0,0 +1,112 @@ +import pytest + +from pandas.errors import ( + AbstractMethodError, + UndefinedVariableError, +) + +import pandas as pd + + +@pytest.mark.parametrize( + "exc", + [ + "AttributeConflictWarning", + "CSSWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "DataError", + "DatabaseError", + "DtypeWarning", + "EmptyDataError", + "IncompatibilityWarning", + "IndexingError", + "InvalidColumnName", + "InvalidComparison", + "InvalidVersion", + "LossySetitemError", + "MergeError", + "NoBufferPresent", + "NumExprClobberingError", + "NumbaUtilError", + "OptionError", + "OutOfBoundsDatetime", + "ParserError", + "ParserWarning", + "PerformanceWarning", + "PossibleDataLossError", + "PossiblePrecisionLoss", + "PyperclipException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UnsortedIndexError", + "UnsupportedFunctionCall", + "ValueLabelTypeMismatch", + ], +) +def test_exception_importable(exc): + from pandas import errors + + err = getattr(errors, exc) + assert err is not None + + # check that we can raise on them + + msg = "^$" + + with pytest.raises(err, match=msg): + raise err() + + +def test_catch_oob(): + from pandas import errors + + msg = "Cannot cast 1500-01-01 00:00:00 to unit='ns' without overflow" + with pytest.raises(errors.OutOfBoundsDatetime, match=msg): + pd.Timestamp("15000101").as_unit("ns") + + +@pytest.mark.parametrize( + "is_local", + [ + True, + False, + ], +) +def test_catch_undefined_variable_error(is_local): + variable_name = "x" + if is_local: + msg = f"local variable '{variable_name}' is not defined" + else: + msg = f"name '{variable_name}' is not defined" + + with pytest.raises(UndefinedVariableError, match=msg): + raise UndefinedVariableError(variable_name, is_local) + + +class Foo: + @classmethod + def classmethod(cls): + raise AbstractMethodError(cls, methodtype="classmethod") + + @property + def property(self): + raise AbstractMethodError(self, methodtype="property") + + def method(self): + raise AbstractMethodError(self) + + +def test_AbstractMethodError_classmethod(): + xpr = "This classmethod must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo.classmethod() + + xpr = "This property must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().property + + xpr = "This method must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().method() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_expressions.py b/venv/lib/python3.10/site-packages/pandas/tests/test_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..dfec99f0786ebf11a44dedfad8aa8e1015356fab --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_expressions.py @@ -0,0 +1,466 @@ +import operator +import re + +import numpy as np +import pytest + +from pandas import option_context +import pandas._testing as tm +from pandas.core.api import ( + DataFrame, + Index, + Series, +) +from pandas.core.computation import expressions as expr + + +@pytest.fixture +def _frame(): + return DataFrame( + np.random.default_rng(2).standard_normal((10001, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _frame2(): + return DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _mixed(_frame): + return DataFrame( + { + "A": _frame["A"].copy(), + "B": _frame["B"].astype("float32"), + "C": _frame["C"].astype("int64"), + "D": _frame["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _mixed2(_frame2): + return DataFrame( + { + "A": _frame2["A"].copy(), + "B": _frame2["B"].astype("float32"), + "C": _frame2["C"].astype("int64"), + "D": _frame2["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _integer(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(10001, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _integer_integers(_integer): + # integers to get a case with zeros + return _integer * np.random.default_rng(2).integers(0, 2, size=np.shape(_integer)) + + +@pytest.fixture +def _integer2(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(101, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _array(_frame): + return _frame["A"].values.copy() + + +@pytest.fixture +def _array2(_frame2): + return _frame2["A"].values.copy() + + +@pytest.fixture +def _array_mixed(_mixed): + return _mixed["D"].values.copy() + + +@pytest.fixture +def _array_mixed2(_mixed2): + return _mixed2["D"].values.copy() + + +@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") +class TestExpressions: + @staticmethod + def call_op(df, other, flex: bool, opname: str): + if flex: + op = lambda x, y: getattr(x, opname)(y) + op.__name__ = opname + else: + op = getattr(operator, opname) + + with option_context("compute.use_numexpr", False): + expected = op(df, other) + + expr.get_test_result() + + result = op(df, other) + return result, expected + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + @pytest.mark.parametrize( + "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"] + ) + def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch): + df = request.getfixturevalue(fixture) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + result, expected = self.call_op(df, df, flex, arith) + + if arith == "truediv": + assert all(x.kind == "f" for x in expected.dtypes.values) + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + result, expected = self.call_op( + df.iloc[:, i], df.iloc[:, i], flex, arith + ) + if arith == "truediv": + assert expected.dtype.kind == "f" + tm.assert_equal(expected, result) + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch): + """ + tests solely that the result is the same whether or not numexpr is + enabled. Need to test whether the function does the correct thing + elsewhere. + """ + df = request.getfixturevalue(fixture) + arith = comparison_op.__name__ + with option_context("compute.use_numexpr", False): + other = df.copy() + 1 + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + expr.set_test_mode(True) + + result, expected = self.call_op(df, other, flex, arith) + + used_numexpr = expr.get_test_result() + assert used_numexpr, "Did not use numexpr as expected." + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + binary_comp = other.iloc[:, i] + 1 + self.call_op(df.iloc[:, i], binary_comp, flex, "add") + + def test_invalid(self): + array = np.random.default_rng(2).standard_normal(1_000_001) + array2 = np.random.default_rng(2).standard_normal(100) + + # no op + result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate") + assert not result + + # min elements + result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate") + assert not result + + # ok, we only check on first part of expression + result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate") + assert result + + @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning") + @pytest.mark.parametrize( + "opname,op_str", + [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], + ) + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_binary_ops(self, request, opname, op_str, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(left, right, opname, op_str): + if opname == "pow": + left = np.abs(left) + + op = getattr(operator, opname) + + # array has 0s + result = expr.evaluate(op, left, left, use_numexpr=True) + expected = expr.evaluate(op, left, left, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit(left, right, opname, op_str) + + expr.set_numexpr_threads(1) + testit(left, right, opname, op_str) + expr.set_numexpr_threads() + testit(left, right, opname, op_str) + + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_comparison_ops(self, request, comparison_op, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(): + f12 = left + 1 + f22 = right + 1 + + op = comparison_op + + result = expr.evaluate(op, left, f12, use_numexpr=True) + expected = expr.evaluate(op, left, f12, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op, right, f22, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize("cond", [True, False]) + @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"]) + def test_where(self, request, cond, fixture): + df = request.getfixturevalue(fixture) + + def testit(): + c = np.empty(df.shape, dtype=np.bool_) + c.fill(cond) + result = expr.where(c, df.values, df.values + 1) + expected = np.where(c, df.values, df.values + 1) + tm.assert_numpy_array_equal(result, expected) + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize( + "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")] + ) + def test_bool_ops_raise_on_arithmetic(self, op_str, opname): + df = DataFrame( + { + "a": np.random.default_rng(2).random(10) > 0.5, + "b": np.random.default_rng(2).random(10) > 0.5, + } + ) + + msg = f"operator '{opname}' not implemented for bool dtypes" + f = getattr(operator, opname) + err_msg = re.escape(msg) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, df.b) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, True) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df.a) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, True) + + @pytest.mark.parametrize( + "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")] + ) + def test_bool_ops_warn_on_arithmetic(self, op_str, opname): + n = 10 + df = DataFrame( + { + "a": np.random.default_rng(2).random(n) > 0.5, + "b": np.random.default_rng(2).random(n) > 0.5, + } + ) + + subs = {"+": "|", "*": "&", "-": "^"} + sub_funcs = {"|": "or_", "&": "and_", "^": "xor"} + + f = getattr(operator, opname) + fe = getattr(operator, sub_funcs[subs[op_str]]) + + if op_str == "-": + # raises TypeError + return + + with tm.use_numexpr(True, min_elements=5): + with tm.assert_produces_warning(): + r = f(df, df) + e = fe(df, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, df.b) + e = fe(df.a, df.b) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, True) + e = fe(df.a, True) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df.a) + e = fe(False, df.a) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df) + e = fe(False, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df, True) + e = fe(df, True) + tm.assert_frame_equal(r, e) + + @pytest.mark.parametrize( + "test_input,expected", + [ + ( + DataFrame( + [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"] + ), + DataFrame([[False, False], [False, False]], columns=["a", "dtype"]), + ), + ( + DataFrame( + [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]], + columns=["a", "b", "c", "dtype"], + ), + DataFrame( + [[False, False], [False, False], [False, False]], + columns=["a", "dtype"], + ), + ), + ], + ) + def test_bool_ops_column_name_dtype(self, test_input, expected): + # GH 22383 - .ne fails if columns containing column name 'dtype' + result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith, _frame, monkeypatch): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + + df = _frame + if axis == 1: + other = df.iloc[0, :] + else: + other = df.iloc[:, 0] + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + + op_func = getattr(df, arith) + + with option_context("compute.use_numexpr", False): + expected = op_func(other, axis=axis) + + result = op_func(other, axis=axis) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize( + "op", + [ + "__mod__", + "__rmod__", + "__floordiv__", + "__rfloordiv__", + ], + ) + @pytest.mark.parametrize("box", [DataFrame, Series, Index]) + @pytest.mark.parametrize("scalar", [-5, 5]) + def test_python_semantics_with_numexpr_installed( + self, op, box, scalar, monkeypatch + ): + # https://github.com/pandas-dev/pandas/issues/36047 + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + data = np.arange(-50, 50) + obj = box(data) + method = getattr(obj, op) + result = method(scalar) + + # compare result with numpy + with option_context("compute.use_numexpr", False): + expected = method(scalar) + + tm.assert_equal(result, expected) + + # compare result element-wise with Python + for i, elem in enumerate(data): + if box == DataFrame: + scalar_result = result.iloc[i, 0] + else: + scalar_result = result[i] + try: + expected = getattr(int(elem), op)(scalar) + except ZeroDivisionError: + pass + else: + assert scalar_result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_flags.py b/venv/lib/python3.10/site-packages/pandas/tests/test_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..9294b3fc3319b78b59d5637acdf3fd75737cd836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_flags.py @@ -0,0 +1,48 @@ +import pytest + +import pandas as pd + + +class TestFlags: + def test_equality(self): + a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags + b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags + + assert a == a + assert b == b + assert a != b + assert a != 2 + + def test_set(self): + df = pd.DataFrame().set_flags(allows_duplicate_labels=True) + a = df.flags + a.allows_duplicate_labels = False + assert a.allows_duplicate_labels is False + a["allows_duplicate_labels"] = True + assert a.allows_duplicate_labels is True + + def test_repr(self): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) + assert a == "" + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) + assert a == "" + + def test_obj_ref(self): + df = pd.DataFrame() + flags = df.flags + del df + with pytest.raises(ValueError, match="object has been deleted"): + flags.allows_duplicate_labels = True + + def test_getitem(self): + df = pd.DataFrame() + flags = df.flags + assert flags["allows_duplicate_labels"] is True + flags["allows_duplicate_labels"] = False + assert flags["allows_duplicate_labels"] is False + + with pytest.raises(KeyError, match="a"): + flags["a"] + + with pytest.raises(ValueError, match="a"): + flags["a"] = 10 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_multilevel.py b/venv/lib/python3.10/site-packages/pandas/tests/test_multilevel.py new file mode 100644 index 0000000000000000000000000000000000000000..6644ec82fab17ac9e1c1744b595c38fda17114f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_multilevel.py @@ -0,0 +1,355 @@ +import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestMultiLevel: + def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data): + # axis=0 + ymd = multiindex_year_month_day_dataframe_random_data + + month_sums = ymd.groupby("month").sum() + result = month_sums.reindex(ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum") + + tm.assert_frame_equal(result, expected) + + # Series + result = month_sums["A"].reindex(ymd.index, level=1) + expected = ymd["A"].groupby(level="month").transform("sum") + tm.assert_series_equal(result, expected, check_names=False) + + # axis=1 + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = ymd.T.groupby("month", axis=1) + + month_sums = gb.sum() + result = month_sums.reindex(columns=ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum").T + tm.assert_frame_equal(result, expected) + + def test_reindex(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + expected = frame.iloc[[0, 3]] + reindexed = frame.loc[[("foo", "one"), ("bar", "one")]] + tm.assert_frame_equal(reindexed, expected) + + def test_reindex_preserve_levels( + self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write + ): + ymd = multiindex_year_month_day_dataframe_random_data + + new_index = ymd.index[::10] + chunk = ymd.reindex(new_index) + if using_copy_on_write: + assert chunk.index.is_(new_index) + else: + assert chunk.index is new_index + + chunk = ymd.loc[new_index] + assert chunk.index.equals(new_index) + + ymdT = ymd.T + chunk = ymdT.reindex(columns=new_index) + if using_copy_on_write: + assert chunk.columns.is_(new_index) + else: + assert chunk.columns is new_index + + chunk = ymdT.loc[:, new_index] + assert chunk.columns.equals(new_index) + + def test_groupby_transform(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + s = frame["A"] + grouper = s.index.get_level_values(0) + + grouped = s.groupby(grouper, group_keys=False) + + applied = grouped.apply(lambda x: x * 2) + expected = grouped.transform(lambda x: x * 2) + result = applied.reindex(expected.index) + tm.assert_series_equal(result, expected, check_names=False) + + def test_groupby_corner(self): + midx = MultiIndex( + levels=[["foo"], ["bar"], ["baz"]], + codes=[[0], [0], [0]], + names=["one", "two", "three"], + ) + df = DataFrame( + [np.random.default_rng(2).random(4)], + columns=["a", "b", "c", "d"], + index=midx, + ) + # should work + df.groupby(level="three") + + def test_groupby_level_no_obs(self): + # #1697 + midx = MultiIndex.from_tuples( + [ + ("f1", "s1"), + ("f1", "s2"), + ("f2", "s1"), + ("f2", "s2"), + ("f3", "s1"), + ("f3", "s2"), + ] + ) + df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx) + df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])] + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df1.groupby(axis=1, level=0) + result = grouped.sum() + assert (result.columns == ["f2", "f3"]).all() + + def test_setitem_with_expansion_multiindex_columns( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + df = ymd[:5].T + df[2000, 1, 10] = df[2000, 1, 7] + assert isinstance(df.columns, MultiIndex) + assert (df[2000, 1, 10] == df[2000, 1, 7]).all() + + def test_alignment(self): + x = Series( + data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]) + ) + + y = Series( + data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]) + ) + + res = x - y + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + # hit non-monotonic code path + res = x[::-1] - y[::-1] + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.groupby(level=[0, 1]).mean() + + k1 = ymd.index.get_level_values(0) + k2 = ymd.index.get_level_values(1) + + expected = ymd.groupby([k1, k2]).mean() + + # TODO groupby with level_values drops names + tm.assert_frame_equal(result, expected, check_names=False) + assert result.index.names == ymd.index.names[:2] + + result2 = ymd.groupby(level=ymd.index.names[:2]).mean() + tm.assert_frame_equal(result, result2) + + def test_multilevel_consolidate(self): + index = MultiIndex.from_tuples( + [("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")] + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=index, columns=index + ) + df["Totals", ""] = df.sum(1) + df = df._consolidate() + + def test_level_with_tuples(self): + index = MultiIndex( + levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar", 0)] + result2 = series.loc[("foo", "bar", 0)] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"): + series[("foo", "bar", 0), 2] + + result = frame.loc[("foo", "bar", 0)] + result2 = frame.xs(("foo", "bar", 0)) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + index = MultiIndex( + levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar")] + result2 = series.loc[("foo", "bar")] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = frame.loc[("foo", "bar")] + result2 = frame.xs(("foo", "bar")) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.reindex(["foo", "qux"], level=0) + expected = frame.iloc[[0, 1, 2, 7, 8, 9]] + tm.assert_frame_equal(result, expected) + + result = frame.T.reindex(["foo", "qux"], axis=1, level=0) + tm.assert_frame_equal(result, expected.T) + + result = frame.loc[["foo", "qux"]] + tm.assert_frame_equal(result, expected) + + result = frame["A"].loc[["foo", "qux"]] + tm.assert_series_equal(result, expected["A"]) + + result = frame.T.loc[:, ["foo", "qux"]] + tm.assert_frame_equal(result, expected.T) + + @pytest.mark.parametrize("d", [4, "d"]) + def test_empty_frame_groupby_dtypes_consistency(self, d): + # GH 20888 + group_keys = ["a", "b", "c"] + df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]}) + + g = df[df.a == 2].groupby(group_keys) + result = g.first().index + expected = MultiIndex( + levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"] + ) + + tm.assert_index_equal(result, expected) + + def test_duplicate_groupby_issues(self): + idx_tp = [ + ("600809", "20061231"), + ("600809", "20070331"), + ("600809", "20070630"), + ("600809", "20070331"), + ] + dt = ["demo", "demo", "demo", "demo"] + + idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"]) + s = Series(dt, index=idx) + + result = s.groupby(s.index).first() + assert len(result) == 3 + + def test_subsets_multiindex_dtype(self): + # GH 20757 + data = [["x", 1]] + columns = [("a", "b", np.nan), ("a", "c", 0.0)] + df = DataFrame(data, columns=MultiIndex.from_tuples(columns)) + expected = df.dtypes.a.b + result = df.a.b.dtypes + tm.assert_series_equal(result, expected) + + def test_datetime_object_multiindex(self): + data_dic = { + (0, datetime.date(2018, 3, 3)): {"A": 1, "B": 10}, + (0, datetime.date(2018, 3, 4)): {"A": 2, "B": 11}, + (1, datetime.date(2018, 3, 3)): {"A": 3, "B": 12}, + (1, datetime.date(2018, 3, 4)): {"A": 4, "B": 13}, + } + result = DataFrame.from_dict(data_dic, orient="index") + data = {"A": [1, 2, 3, 4], "B": [10, 11, 12, 13]} + index = [ + [0, 0, 1, 1], + [ + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + ], + ] + expected = DataFrame(data=data, index=index) + + tm.assert_frame_equal(result, expected) + + def test_multiindex_with_na(self): + df = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + df.at[("A", "F"), "col_2"] = 0.0 + + expected = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ["A", "F", np.nan, 0.0], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + tm.assert_frame_equal(df, expected) + + +class TestSorted: + """everything you wanted to test about sorting""" + + def test_sort_non_lexsorted(self): + # degenerate case where we sort but don't + # have a satisfying result :< + # GH 15797 + idx = MultiIndex( + [["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]] + ) + + df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64") + assert df.index.is_monotonic_increasing is False + + sorted = df.sort_index() + assert sorted.index.is_monotonic_increasing is True + + expected = DataFrame( + {"col": [1, 4, 5, 2]}, + index=MultiIndex.from_tuples( + [("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")] + ), + dtype="int64", + ) + result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :] + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_nanops.py b/venv/lib/python3.10/site-packages/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000000000000000000000000000000..a50054f33f382ed913261e0cafd944c2fd86aaa3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_nanops.py @@ -0,0 +1,1274 @@ +from functools import partial + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + Series, + isna, +) +import pandas._testing as tm +from pandas.core import nanops + +use_bn = nanops._USE_BOTTLENECK + + +@pytest.fixture +def disable_bottleneck(monkeypatch): + with monkeypatch.context() as m: + m.setattr(nanops, "_USE_BOTTLENECK", False) + yield + + +@pytest.fixture +def arr_shape(): + return 11, 7 + + +@pytest.fixture +def arr_float(arr_shape): + return np.random.default_rng(2).standard_normal(arr_shape) + + +@pytest.fixture +def arr_complex(arr_float): + return arr_float + arr_float * 1j + + +@pytest.fixture +def arr_int(arr_shape): + return np.random.default_rng(2).integers(-10, 10, arr_shape) + + +@pytest.fixture +def arr_bool(arr_shape): + return np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + + +@pytest.fixture +def arr_str(arr_float): + return np.abs(arr_float).astype("S") + + +@pytest.fixture +def arr_utf(arr_float): + return np.abs(arr_float).astype("U") + + +@pytest.fixture +def arr_date(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + + +@pytest.fixture +def arr_tdelta(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + + +@pytest.fixture +def arr_nan(arr_shape): + return np.tile(np.nan, arr_shape) + + +@pytest.fixture +def arr_float_nan(arr_float, arr_nan): + return np.vstack([arr_float, arr_nan]) + + +@pytest.fixture +def arr_nan_float1(arr_nan, arr_float): + return np.vstack([arr_nan, arr_float]) + + +@pytest.fixture +def arr_nan_nan(arr_nan): + return np.vstack([arr_nan, arr_nan]) + + +@pytest.fixture +def arr_inf(arr_float): + return arr_float * np.inf + + +@pytest.fixture +def arr_float_inf(arr_float, arr_inf): + return np.vstack([arr_float, arr_inf]) + + +@pytest.fixture +def arr_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_inf]) + + +@pytest.fixture +def arr_float_nan_inf(arr_float, arr_nan, arr_inf): + return np.vstack([arr_float, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_nan_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_obj( + arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta +): + return np.vstack( + [ + arr_float.astype("O"), + arr_int.astype("O"), + arr_bool.astype("O"), + arr_complex.astype("O"), + arr_str.astype("O"), + arr_utf.astype("O"), + arr_date.astype("O"), + arr_tdelta.astype("O"), + ] + ) + + +@pytest.fixture +def arr_nan_nanj(arr_nan): + with np.errstate(invalid="ignore"): + return arr_nan + arr_nan * 1j + + +@pytest.fixture +def arr_complex_nan(arr_complex, arr_nan_nanj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_nanj]) + + +@pytest.fixture +def arr_nan_infj(arr_inf): + with np.errstate(invalid="ignore"): + return arr_inf * 1j + + +@pytest.fixture +def arr_complex_nan_infj(arr_complex, arr_nan_infj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_infj]) + + +@pytest.fixture +def arr_float_1d(arr_float): + return arr_float[:, 0] + + +@pytest.fixture +def arr_nan_1d(arr_nan): + return arr_nan[:, 0] + + +@pytest.fixture +def arr_float_nan_1d(arr_float_nan): + return arr_float_nan[:, 0] + + +@pytest.fixture +def arr_float1_nan_1d(arr_float1_nan): + return arr_float1_nan[:, 0] + + +@pytest.fixture +def arr_nan_float1_1d(arr_nan_float1): + return arr_nan_float1[:, 0] + + +class TestnanopsDataFrame: + def setup_method(self): + nanops._USE_BOTTLENECK = False + + arr_shape = (11, 7) + + self.arr_float = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_complex = self.arr_float + self.arr_float1 * 1j + self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape) + self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype("S") + self.arr_utf = np.abs(self.arr_float).astype("U") + self.arr_date = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + ) + self.arr_tdelta = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + ) + + self.arr_nan = np.tile(np.nan, arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float * np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) + self.arr_obj = np.vstack( + [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + self.arr_complex.astype("O"), + self.arr_str.astype("O"), + self.arr_utf.astype("O"), + self.arr_date.astype("O"), + self.arr_tdelta.astype("O"), + ] + ) + + with np.errstate(invalid="ignore"): + self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf * 1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float + self.arr_float1_2d = self.arr_float1 + + self.arr_nan_2d = self.arr_nan + self.arr_float_nan_2d = self.arr_float_nan + self.arr_float1_nan_2d = self.arr_float1_nan + self.arr_nan_float1_2d = self.arr_nan_float1 + + self.arr_float_1d = self.arr_float[:, 0] + self.arr_float1_1d = self.arr_float1[:, 0] + + self.arr_nan_1d = self.arr_nan[:, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0] + + def teardown_method(self): + nanops._USE_BOTTLENECK = use_bn + + def check_results(self, targ, res, axis, check_dtype=True): + res = getattr(res, "asm8", res) + + if ( + axis != 0 + and hasattr(targ, "shape") + and targ.ndim + and targ.shape != res.shape + ): + res = np.split(res, [targ.shape[0]], axis=0)[0] + + try: + tm.assert_almost_equal(targ, res, check_dtype=check_dtype) + except AssertionError: + # handle timedelta dtypes + if hasattr(targ, "dtype") and targ.dtype == "m8[ns]": + raise + + # There are sometimes rounding errors with + # complex and object dtypes. + # If it isn't one of those, re-raise the error. + if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]: + raise + # convert object dtypes to something that can be split into + # real and imaginary parts + if res.dtype.kind == "O": + if targ.dtype.kind != "O": + res = res.astype(targ.dtype) + else: + cast_dtype = "c16" if hasattr(np, "complex128") else "f8" + res = res.astype(cast_dtype) + targ = targ.astype(cast_dtype) + # there should never be a case where numpy returns an object + # but nanops doesn't, so make that an exception + elif targ.dtype.kind == "O": + raise + tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype) + tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype) + + def check_fun_data( + self, + testfunc, + targfunc, + testarval, + targarval, + skipna, + check_dtype=True, + empty_targfunc=None, + **kwargs, + ): + for axis in list(range(targarval.ndim)) + [None]: + targartempval = targarval if skipna else testarval + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: + targ = targfunc(targartempval, axis=axis, **kwargs) + + if targartempval.dtype == object and ( + targfunc is np.any or targfunc is np.all + ): + # GH#12863 the numpy functions will retain e.g. floatiness + if isinstance(targ, np.ndarray): + targ = targ.astype(bool) + else: + targ = bool(targ) + + res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + + if ( + isinstance(targ, np.complex128) + and isinstance(res, float) + and np.isnan(targ) + and np.isnan(res) + ): + # GH#18463 + targ = res + + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna: + res = testfunc(testarval, axis=axis, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if axis is None: + res = testfunc(testarval, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna and axis is None: + res = testfunc(testarval, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + + if testarval.ndim <= 1: + return + + # Recurse on lower-dimension + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + self.check_fun_data( + testfunc, + targfunc, + testarval2, + targarval2, + skipna=skipna, + check_dtype=check_dtype, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_fun( + self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs + ): + targar = testar + if testar.endswith("_nan") and hasattr(self, testar[:-4]): + targar = testar[:-4] + + testarval = getattr(self, testar) + targarval = getattr(self, targar) + self.check_fun_data( + testfunc, + targfunc, + testarval, + targarval, + skipna=skipna, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_funs( + self, + testfunc, + targfunc, + skipna, + allow_complex=True, + allow_all_nan=True, + allow_date=True, + allow_tdelta=True, + allow_obj=True, + **kwargs, + ): + self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs) + objs = [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + ] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs) + objs += [self.arr_complex.astype("O")] + + if allow_date: + targfunc(self.arr_date) + self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs) + objs += [self.arr_date.astype("O")] + + if allow_tdelta: + try: + targfunc(self.arr_tdelta) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs) + objs += [self.arr_tdelta.astype("O")] + + if allow_obj: + self.arr_obj = np.vstack(objs) + # some nanops handle object dtypes better than their numpy + # counterparts, so the numpy functions need to be given something + # else + if allow_obj == "convert": + targfunc = partial( + self._badobj_wrap, func=targfunc, allow_complex=allow_complex + ) + self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs) + + def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): + if value.dtype.kind == "O": + if allow_complex: + value = value.astype("c16") + else: + value = value.astype("f8") + return func(value, **kwargs) + + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)] + ) + def test_nan_funcs(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False) + + def test_nansum(self, skipna): + self.check_funs( + nanops.nansum, + np.sum, + skipna, + allow_date=False, + check_dtype=False, + empty_targfunc=np.nansum, + ) + + def test_nanmean(self, skipna): + self.check_funs( + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanmedian(self, skipna): + self.check_funs( + nanops.nanmedian, + np.median, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanvar(self, ddof, skipna): + self.check_funs( + nanops.nanvar, + np.var, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd(self, ddof, skipna): + self.check_funs( + nanops.nanstd, + np.std, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nansem(self, ddof, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nansem, + sp_stats.sem, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)] + ) + def test_nanops_with_warnings(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isna(nans) + if res.ndim: + res[nullnan] = -1 + elif ( + hasattr(nullnan, "all") + and nullnan.all() + or not hasattr(nullnan, "all") + and nullnan + ): + res = -1 + return res + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmax(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmin(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmin) + self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype("f8") + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0.0 + return result + + def test_nanskew(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func = partial(self._skew_kurt_wrap, func=sp_stats.skew) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nanskew, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nankurt(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func1 = partial(sp_stats.kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nankurt, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nanprod(self, skipna): + self.check_funs( + nanops.nanprod, + np.prod, + skipna, + allow_date=False, + allow_tdelta=False, + empty_targfunc=np.nanprod, + ) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) + res01 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs) + res11 = checkfun( + self.arr_float_nan_2d, + self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs) + res24 = checkfun( + self.arr_float_nan_2d, + self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs) + res01 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs) + res11 = checkfun( + self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs) + res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs) + res24 = checkfun( + self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson") + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_kendall(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall") + targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall") + + def test_nancorr_spearman(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman") + targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman") + + def test_invalid_method(self): + pytest.importorskip("scipy") + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'" + with pytest.raises(ValueError, match=msg): + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo") + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_complex", False), + ("arr_int", False), + ("arr_bool", False), + ("arr_str", False), + ("arr_utf", False), + ("arr_complex", False), + ("arr_complex_nan", False), + ("arr_nan_nanj", False), + ("arr_nan_infj", True), + ("arr_complex_nan_infj", True), + ], +) +def test_has_infs_non_float(request, arr, correct, disable_bottleneck): + val = request.getfixturevalue(arr) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_float", False), + ("arr_nan", False), + ("arr_float_nan", False), + ("arr_nan_nan", False), + ("arr_float_inf", True), + ("arr_inf", True), + ("arr_nan_inf", True), + ("arr_float_nan_inf", True), + ("arr_nan_nan_inf", True), + ], +) +@pytest.mark.parametrize("astype", [None, "f4", "f2"]) +def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck): + val = request.getfixturevalue(arr) + if astype is not None: + val = val.astype(astype) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"] +) +def test_bn_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert nanops._bn_ok_dtype(obj.dtype, "test") + + +@pytest.mark.parametrize( + "fixture", + [ + "arr_date", + "arr_tdelta", + "arr_obj", + ], +) +def test_bn_not_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert not nanops._bn_ok_dtype(obj.dtype, "test") + + +class TestEnsureNumeric: + def test_numeric_values(self): + # Test integer + assert nanops._ensure_numeric(1) == 1 + + # Test float + assert nanops._ensure_numeric(1.1) == 1.1 + + # Test complex + assert nanops._ensure_numeric(1 + 2j) == 1 + 2j + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + assert np.allclose(nanops._ensure_numeric(values), values) + + # Test object ndarray + o_values = values.astype(object) + assert np.allclose(nanops._ensure_numeric(o_values), values) + + # Test convertible string ndarray + s_values = np.array(["1", "2", "3"], dtype=object) + msg = r"Could not convert \['1' '2' '3'\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + # Test non-convertible string ndarray + s_values = np.array(["foo", "bar", "baz"], dtype=object) + msg = r"Could not convert .* to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + def test_convertable_values(self): + with pytest.raises(TypeError, match="Could not convert string '1' to numeric"): + nanops._ensure_numeric("1") + with pytest.raises( + TypeError, match="Could not convert string '1.1' to numeric" + ): + nanops._ensure_numeric("1.1") + with pytest.raises( + TypeError, match=r"Could not convert string '1\+1j' to numeric" + ): + nanops._ensure_numeric("1+1j") + + def test_non_convertable_values(self): + msg = "Could not convert string 'foo' to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric("foo") + + # with the wrong type, python raises TypeError for us + msg = "argument must be a string or a number" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric({}) + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric([]) + + +class TestNanvarFixedValues: + # xref GH10242 + # Samples from a normal distribution. + @pytest.fixture + def variance(self): + return 3.0 + + @pytest.fixture + def samples(self, variance): + return self.prng.normal(scale=variance**0.5, size=100000) + + def test_nanvar_all_finite(self, samples, variance): + actual_variance = nanops.nanvar(samples) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + def test_nanvar_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_variance = nanops.nanvar(samples_test, skipna=True) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + actual_variance = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2) + + def test_nanstd_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_std = nanops.nanstd(samples_test, skipna=True) + tm.assert_almost_equal(actual_std, variance**0.5, rtol=1e-2) + + actual_std = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2) + + def test_nanvar_axis(self, samples, variance): + # Generate some sample data. + samples_unif = self.prng.uniform(size=samples.shape[0]) + samples = np.vstack([samples, samples_unif]) + + actual_variance = nanops.nanvar(samples, axis=1) + tm.assert_almost_equal( + actual_variance, np.array([variance, 1.0 / 12]), rtol=1e-2 + ) + + def test_nanvar_ddof(self): + n = 5 + samples = self.prng.uniform(size=(10000, n + 1)) + samples[:, -1] = np.nan # Force use of our own algorithm. + + variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean() + variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean() + variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean() + + # The unbiased estimate. + var = 1.0 / 12 + tm.assert_almost_equal(variance_1, var, rtol=1e-2) + + # The underestimated variance. + tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2) + + # The overestimated variance. + tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2) + + @pytest.mark.parametrize("axis", range(2)) + @pytest.mark.parametrize("ddof", range(3)) + def test_ground_truth(self, axis, ddof): + # Test against values that were precomputed with Numpy. + samples = np.empty((4, 4)) + samples[:3, :3] = np.array( + [ + [0.97303362, 0.21869576, 0.55560287], + [0.72980153, 0.03109364, 0.99155171], + [0.09317602, 0.60078248, 0.15871292], + ] + ) + samples[3] = samples[:, 3] = np.nan + + # Actual variances along axis=0, 1 for ddof=0, 1, 2 + variance = np.array( + [ + [ + [0.13762259, 0.05619224, 0.11568816], + [0.20643388, 0.08428837, 0.17353224], + [0.41286776, 0.16857673, 0.34706449], + ], + [ + [0.09519783, 0.16435395, 0.05082054], + [0.14279674, 0.24653093, 0.07623082], + [0.28559348, 0.49306186, 0.15246163], + ], + ] + ) + + # Test nanvar. + var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(var[:3], variance[axis, ddof]) + assert np.isnan(var[3]) + + # Test nanstd. + std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5) + assert np.isnan(std[3]) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd_roundoff(self, ddof): + # Regression test for GH 10242 (test data taken from GH 10489). Ensure + # that variance is stable. + data = Series(766897346 * np.ones(10)) + result = data.std(ddof=ddof) + assert result == 0.0 + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNanskewFixedValues: + # xref GH 11974 + # Test data + skewness value (computed with scipy.stats.skew) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_skew(self): + return -0.1875895205961754 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + skew = nanops.nanskew(data) + assert skew == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(left_tailed) < 0 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(right_tailed) > 0 + + def test_ground_truth(self, samples, actual_skew): + skew = nanops.nanskew(samples) + tm.assert_almost_equal(skew, actual_skew) + + def test_axis(self, samples, actual_skew): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + skew = nanops.nanskew(samples, axis=1) + tm.assert_almost_equal(skew, np.array([actual_skew, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=False) + assert np.isnan(skew) + + def test_nans_skipna(self, samples, actual_skew): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=True) + tm.assert_almost_equal(skew, actual_skew) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNankurtFixedValues: + # xref GH 11974 + # Test data + kurtosis value (computed with scipy.stats.kurtosis) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_kurt(self): + return -1.2058303433799713 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + kurt = nanops.nankurt(data) + assert kurt == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(left_tailed) < 2 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(right_tailed) < 0 + + def test_ground_truth(self, samples, actual_kurt): + kurt = nanops.nankurt(samples) + tm.assert_almost_equal(kurt, actual_kurt) + + def test_axis(self, samples, actual_kurt): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + kurt = nanops.nankurt(samples, axis=1) + tm.assert_almost_equal(kurt, np.array([actual_kurt, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=False) + assert np.isnan(kurt) + + def test_nans_skipna(self, samples, actual_kurt): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=True) + tm.assert_almost_equal(kurt, actual_kurt) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestDatetime64NaNOps: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + # Enabling mean changes the behavior of DataFrame.mean + # See https://github.com/pandas-dev/pandas/issues/24752 + def test_nanmean(self, unit): + dti = pd.date_range("2016-01-01", periods=3).as_unit(unit) + expected = dti[1] + + for obj in [dti, dti._data]: + result = nanops.nanmean(obj) + assert result == expected + + dti2 = dti.insert(1, pd.NaT) + + for obj in [dti2, dti2._data]: + result = nanops.nanmean(obj) + assert result == expected + + @pytest.mark.parametrize("constructor", ["M8", "m8"]) + def test_nanmean_skipna_false(self, constructor, unit): + dtype = f"{constructor}[{unit}]" + arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3) + + arr[-1, -1] = "NaT" + + result = nanops.nanmean(arr, skipna=False) + assert np.isnat(result) + assert result.dtype == dtype + + result = nanops.nanmean(arr, axis=0, skipna=False) + expected = np.array([4, 5, "NaT"], dtype=arr.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = nanops.nanmean(arr, axis=1, skipna=False) + expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]]) + tm.assert_numpy_array_equal(result, expected) + + +def test_use_bottleneck(): + if nanops._BOTTLENECK_INSTALLED: + with pd.option_context("use_bottleneck", True): + assert pd.get_option("use_bottleneck") + + with pd.option_context("use_bottleneck", False): + assert not pd.get_option("use_bottleneck") + + +@pytest.mark.parametrize( + "numpy_op, expected", + [ + (np.sum, 10), + (np.nansum, 10), + (np.mean, 2.5), + (np.nanmean, 2.5), + (np.median, 2.5), + (np.nanmedian, 2.5), + (np.min, 1), + (np.max, 4), + (np.nanmin, 1), + (np.nanmax, 4), + ], +) +def test_numpy_ops(numpy_op, expected): + # GH8383 + result = numpy_op(Series([1, 2, 3, 4])) + assert result == expected + + +@pytest.mark.parametrize( + "operation", + [ + nanops.nanany, + nanops.nanall, + nanops.nansum, + nanops.nanmean, + nanops.nanmedian, + nanops.nanstd, + nanops.nanvar, + nanops.nansem, + nanops.nanargmax, + nanops.nanargmin, + nanops.nanmax, + nanops.nanmin, + nanops.nanskew, + nanops.nankurt, + nanops.nanprod, + ], +) +def test_nanops_independent_of_mask_param(operation): + # GH22764 + ser = Series([1, 2, np.nan, 3, np.nan, 4]) + mask = ser.isna() + median_expected = operation(ser._values) + median_result = operation(ser._values, mask=mask) + assert median_expected == median_result + + +@pytest.mark.parametrize("min_count", [-1, 0]) +def test_check_below_min_count_negative_or_zero_min_count(min_count): + # GH35227 + result = nanops.check_below_min_count((21, 37), None, min_count) + expected_result = False + assert result == expected_result + + +@pytest.mark.parametrize( + "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] +) +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) +def test_check_below_min_count_positive_min_count(mask, min_count, expected_result): + # GH35227 + shape = (10, 10) + result = nanops.check_below_min_count(shape, mask, min_count) + assert result == expected_result + + +@td.skip_if_windows +@td.skip_if_32bit +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) +def test_check_below_min_count_large_shape(min_count, expected_result): + # GH35227 large shape used to show that the issue is fixed + shape = (2244367, 1253) + result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) + assert result == expected_result + + +@pytest.mark.parametrize("func", ["nanmean", "nansum"]) +def test_check_bottleneck_disallow(any_real_numpy_dtype, func): + # GH 42878 bottleneck sometimes produces unreliable results for mean and sum + assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func) + + +@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) +def test_nanmean_overflow(disable_bottleneck, val): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + + ser = Series(val, index=range(500), dtype=np.int64) + result = ser.mean() + np_result = ser.values.mean() + assert result == val + assert result == np_result + assert result.dtype == np.float64 + + +@pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], +) +@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"]) +def test_returned_dtype(disable_bottleneck, dtype, method): + if dtype is None: + pytest.skip("np.float128 not available") + + ser = Series(range(10), dtype=dtype) + result = getattr(ser, method)() + if is_integer_dtype(dtype) and method not in ["min", "max"]: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py b/venv/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..52b5f636b1254ceddf869704a6378f4ea5012b8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py @@ -0,0 +1,100 @@ +import sys +import types + +import pytest + +from pandas.compat._optional import ( + VERSIONS, + import_optional_dependency, +) + +import pandas._testing as tm + + +def test_import_optional(): + match = "Missing .*notapackage.* pip .* conda .* notapackage" + with pytest.raises(ImportError, match=match) as exc_info: + import_optional_dependency("notapackage") + # The original exception should be there as context: + assert isinstance(exc_info.value.__context__, ImportError) + + result = import_optional_dependency("notapackage", errors="ignore") + assert result is None + + +def test_xlrd_version_fallback(): + pytest.importorskip("xlrd") + import_optional_dependency("xlrd") + + +def test_bad_version(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule") + + # Test min_version parameter + result = import_optional_dependency("fakemodule", min_version="0.8") + assert result is module + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule") + assert result is module + + with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"): + import_optional_dependency("fakemodule", min_version="1.1.0") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency( + "fakemodule", errors="warn", min_version="1.1.0" + ) + assert result is None + + result = import_optional_dependency( + "fakemodule", errors="ignore", min_version="1.1.0" + ) + assert result is None + + +def test_submodule(monkeypatch): + # Create a fake module with a submodule + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + sub_name = "submodule" + submodule = types.ModuleType(sub_name) + setattr(module, sub_name, submodule) + sys.modules[f"{name}.{sub_name}"] = submodule + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule.submodule") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule.submodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule.submodule") + assert result is submodule + + +def test_no_version_raises(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + with pytest.raises(ImportError, match="Can't determine .* fakemodule"): + import_optional_dependency(name) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py b/venv/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..4e569dc40005d5883870b82f46859d5bc36578f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py @@ -0,0 +1,103 @@ +from collections.abc import Generator +import contextlib + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import accessor + + +def test_dirname_mixin() -> None: + # GH37173 + + class X(accessor.DirNamesMixin): + x = 1 + y: int + + def __init__(self) -> None: + self.z = 3 + + result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")] + + assert result == ["x", "z"] + + +@contextlib.contextmanager +def ensure_removed(obj, attr) -> Generator[None, None, None]: + """Ensure that an attribute added to 'obj' during the test is + removed when we're done + """ + try: + yield + finally: + try: + delattr(obj, attr) + except AttributeError: + pass + obj._accessors.discard(attr) + + +class MyAccessor: + def __init__(self, obj) -> None: + self.obj = obj + self.item = "item" + + @property + def prop(self): + return self.item + + def method(self): + return self.item + + +@pytest.mark.parametrize( + "obj, registrar", + [ + (pd.Series, pd.api.extensions.register_series_accessor), + (pd.DataFrame, pd.api.extensions.register_dataframe_accessor), + (pd.Index, pd.api.extensions.register_index_accessor), + ], +) +def test_register(obj, registrar): + with ensure_removed(obj, "mine"): + before = set(dir(obj)) + registrar("mine")(MyAccessor) + o = obj([]) if obj is not pd.Series else obj([], dtype=object) + assert o.mine.prop == "item" + after = set(dir(obj)) + assert (before ^ after) == {"mine"} + assert "mine" in obj._accessors + + +def test_accessor_works(): + with ensure_removed(pd.Series, "mine"): + pd.api.extensions.register_series_accessor("mine")(MyAccessor) + + s = pd.Series([1, 2]) + assert s.mine.obj is s + + assert s.mine.prop == "item" + assert s.mine.method() == "item" + + +def test_overwrite_warns(): + match = r".*MyAccessor.*fake.*Series.*" + with tm.assert_produces_warning(UserWarning, match=match): + with ensure_removed(pd.Series, "fake"): + setattr(pd.Series, "fake", 123) + pd.api.extensions.register_series_accessor("fake")(MyAccessor) + s = pd.Series([1, 2]) + assert s.fake.prop == "item" + + +def test_raises_attribute_error(): + with ensure_removed(pd.Series, "bad"): + + @pd.api.extensions.register_series_accessor("bad") + class Bad: + def __init__(self, data) -> None: + raise AttributeError("whoops") + + with pytest.raises(AttributeError, match="whoops"): + pd.Series([], dtype=object).bad diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_sorting.py b/venv/lib/python3.10/site-packages/pandas/tests/test_sorting.py new file mode 100644 index 0000000000000000000000000000000000000000..285f240028152072ed52b3657113d30a7fd63fea --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_sorting.py @@ -0,0 +1,487 @@ +from collections import defaultdict +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + array, + concat, + merge, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort +import pandas.core.common as com +from pandas.core.sorting import ( + _decons_group_index, + get_group_index, + is_int64_overflow_possible, + lexsort_indexer, + nargsort, +) + + +@pytest.fixture +def left_right(): + low, high, n = -1 << 10, 1 << 10, 1 << 20 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG") + ) + left["left"] = left.sum(axis=1) + + # one-2-one match + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + right.columns = right.columns[:-1].tolist() + ["right"] + right.index = np.arange(len(right)) + right["right"] *= -1 + return left, right + + +class TestSorting: + @pytest.mark.slow + def test_int64_overflow(self): + B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) + A = np.arange(2500) + df = DataFrame( + { + "A": A, + "B": B, + "C": A, + "D": B, + "E": A, + "F": B, + "G": A, + "H": B, + "values": np.random.default_rng(2).standard_normal(2500), + } + ) + + lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"]) + rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"]) + + left = lg.sum()["values"] + right = rg.sum()["values"] + + exp_index, _ = left.index.sortlevel() + tm.assert_index_equal(left.index, exp_index) + + exp_index, _ = right.index.sortlevel(0) + tm.assert_index_equal(right.index, exp_index) + + tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values)) + tups = com.asarray_tuplesafe(tups) + + expected = df.groupby(tups).sum()["values"] + + for k, v in expected.items(): + assert left[k] == right[k[::-1]] + assert left[k] == v + assert len(left) == len(right) + + def test_int64_overflow_groupby_large_range(self): + # GH9096 + values = range(55109) + data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values}) + grouped = data.groupby(["a", "b", "c", "d"]) + assert len(grouped) == len(values) + + @pytest.mark.parametrize("agg", ["mean", "median"]) + def test_int64_overflow_groupby_large_df_shuffled(self, agg): + rs = np.random.default_rng(2) + arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5)) + i = rs.choice(len(arr), len(arr) * 4) + arr = np.vstack((arr, arr[i])) # add some duplicate rows + + i = rs.permutation(len(arr)) + arr = arr[i] # shuffle rows + + df = DataFrame(arr, columns=list("abcde")) + df["jim"], df["joe"] = np.zeros((2, len(df))) + gr = df.groupby(list("abcde")) + + # verify this is testing what it is supposed to test! + assert is_int64_overflow_possible(gr._grouper.shape) + + mi = MultiIndex.from_arrays( + [ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)], + names=list("abcde"), + ) + + res = DataFrame( + np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi + ).sort_index() + + tm.assert_frame_equal(getattr(gr, agg)(), res) + + @pytest.mark.parametrize( + "order, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_lexsort_indexer(self, order, na_position, exp): + keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5] + result = lexsort_indexer(keys, orders=order, na_position=na_position) + tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) + + @pytest.mark.parametrize( + "ascending, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_nargsort(self, ascending, na_position, exp): + # list places NaNs last, np.array(..., dtype="O") may not place NaNs first + items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O") + + # mergesort is the most difficult to get right because we want it to be + # stable. + + # According to numpy/core/tests/test_multiarray, """The number of + # sorted items must be greater than ~50 to check the actual algorithm + # because quick and merge sort fall over to insertion sort for small + # arrays.""" + + result = nargsort( + items, kind="mergesort", ascending=ascending, na_position=na_position + ) + tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) + + +class TestMerge: + def test_int64_overflow_outer_merge(self): + # #2690, combinatorial explosion + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G1"], + ) + df2 = DataFrame( + np.random.default_rng(3).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G2"], + ) + result = merge(df1, df2, how="outer") + assert len(result) == 2000 + + @pytest.mark.slow + def test_int64_overflow_check_sum_col(self, left_right): + left, right = left_right + + out = merge(left, right, how="outer") + assert len(out) == len(left) + tm.assert_series_equal(out["left"], -out["right"], check_names=False) + result = out.iloc[:, :-2].sum(axis=1) + tm.assert_series_equal(out["left"], result, check_names=False) + assert result.name is None + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + def test_int64_overflow_how_merge(self, left_right, how): + left, right = left_right + + out = merge(left, right, how="outer") + out.sort_values(out.columns.tolist(), inplace=True) + out.index = np.arange(len(out)) + tm.assert_frame_equal(out, merge(left, right, how=how, sort=True)) + + @pytest.mark.slow + def test_int64_overflow_sort_false_order(self, left_right): + left, right = left_right + + # check that left merge w/ sort=False maintains left frame order + out = merge(left, right, how="left", sort=False) + tm.assert_frame_equal(left, out[left.columns.tolist()]) + + out = merge(right, left, how="left", sort=False) + tm.assert_frame_equal(right, out[right.columns.tolist()]) + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + @pytest.mark.parametrize("sort", [True, False]) + def test_int64_overflow_one_to_many_none_match(self, how, sort): + # one-2-many/none match + low, high, n = -1 << 10, 1 << 10, 1 << 11 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # confirm that this is checking what it is supposed to check + shape = left.apply(Series.nunique).values + assert is_int64_overflow_possible(shape) + + # add duplicates to left frame + left = concat([left, left], ignore_index=True) + + right = DataFrame( + np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # add duplicates & overlap with left to the right frame + i = np.random.default_rng(4).choice(len(left), n) + right = concat([right, right, left.iloc[i]], ignore_index=True) + + left["left"] = np.random.default_rng(2).standard_normal(len(left)) + right["right"] = np.random.default_rng(2).standard_normal(len(right)) + + # shuffle left & right frames + i = np.random.default_rng(5).permutation(len(left)) + left = left.iloc[i].copy() + left.index = np.arange(len(left)) + + i = np.random.default_rng(6).permutation(len(right)) + right = right.iloc[i].copy() + right.index = np.arange(len(right)) + + # manually compute outer merge + ldict, rdict = defaultdict(list), defaultdict(list) + + for idx, row in left.set_index(list("ABCDEFG")).iterrows(): + ldict[idx].append(row["left"]) + + for idx, row in right.set_index(list("ABCDEFG")).iterrows(): + rdict[idx].append(row["right"]) + + vals = [] + for k, lval in ldict.items(): + rval = rdict.get(k, [np.nan]) + for lv, rv in product(lval, rval): + vals.append( + k + + ( + lv, + rv, + ) + ) + + for k, rval in rdict.items(): + if k not in ldict: + vals.extend( + k + + ( + np.nan, + rv, + ) + for rv in rval + ) + + def align(df): + df = df.sort_values(df.columns.tolist()) + df.index = np.arange(len(df)) + return df + + out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"]) + out = align(out) + + jmask = { + "left": out["left"].notna(), + "right": out["right"].notna(), + "inner": out["left"].notna() & out["right"].notna(), + "outer": np.ones(len(out), dtype="bool"), + } + + mask = jmask[how] + frame = align(out[mask].copy()) + assert mask.all() ^ mask.any() or how == "outer" + + res = merge(left, right, how=how, sort=sort) + if sort: + kcols = list("ABCDEFG") + tm.assert_frame_equal( + res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort") + ) + + # as in GH9092 dtypes break with outer/right join + # 2021-12-18: dtype does not break anymore + tm.assert_frame_equal(frame, align(res)) + + +@pytest.mark.parametrize( + "codes_list, shape", + [ + [ + [ + np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64), + ], + (4, 5, 6), + ], + [ + [ + np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5), + ], + (10000, 10000), + ], + ], +) +def test_decons(codes_list, shape): + group_index = get_group_index(codes_list, shape, sort=True, xnull=True) + codes_list2 = _decons_group_index(group_index, shape) + + for a, b in zip(codes_list, codes_list2): + tm.assert_numpy_array_equal(a, b) + + +class TestSafeSort: + @pytest.mark.parametrize( + "arg, exp", + [ + [[3, 1, 2, 0, 4], [0, 1, 2, 3, 4]], + [ + np.array(list("baaacb"), dtype=object), + np.array(list("aaabbc"), dtype=object), + ], + [[], []], + ], + ) + def test_basic_sort(self, arg, exp): + result = safe_sort(np.array(arg)) + expected = np.array(exp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + @pytest.mark.parametrize( + "codes, exp_codes", + [ + [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]], + [[], []], + ], + ) + def test_codes(self, verify, codes, exp_codes): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + result, result_codes = safe_sort( + values, codes, use_na_sentinel=True, verify=verify + ) + expected_codes = np.array(exp_codes, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_codes_out_of_bound(self): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + # out of bound indices + codes = [0, 101, 102, 2, 3, 0, 99, 4] + result, result_codes = safe_sort(values, codes, use_na_sentinel=True) + expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_mixed_integer(self): + values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object) + result = safe_sort(values) + expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_mixed_integer_with_codes(self): + values = np.array(["b", 1, 0, "a"], dtype=object) + codes = [0, 1, 2, 3, 0, -1, 1] + result, result_codes = safe_sort(values, codes) + expected = np.array([0, 1, "a", "b"], dtype=object) + expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_unsortable(self): + # GH 13714 + arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + safe_sort(arr) + + @pytest.mark.parametrize( + "arg, codes, err, msg", + [ + [1, None, TypeError, "Only np.ndarray, ExtensionArray, and Index"], + [np.array([0, 1, 2]), 1, TypeError, "Only list-like objects or None"], + [np.array([0, 1, 2, 1]), [0, 1], ValueError, "values should be unique"], + ], + ) + def test_exceptions(self, arg, codes, err, msg): + with pytest.raises(err, match=msg): + safe_sort(values=arg, codes=codes) + + @pytest.mark.parametrize( + "arg, exp", [[[1, 3, 2], [1, 2, 3]], [[1, 3, np.nan, 2], [1, 2, 3, np.nan]]] + ) + def test_extension_array(self, arg, exp): + a = array(arg, dtype="Int64") + result = safe_sort(a) + expected = array(exp, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + def test_extension_array_codes(self, verify): + a = array([1, 3, 2], dtype="Int64") + result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify) + expected_values = array([1, 2, 3], dtype="Int64") + expected_codes = np.array([0, 2, -1, 1], dtype=np.intp) + tm.assert_extension_array_equal(result, expected_values) + tm.assert_numpy_array_equal(codes, expected_codes) + + +def test_mixed_str_null(nulls_fixture): + values = np.array(["b", nulls_fixture, "a", "b"], dtype=object) + result = safe_sort(values) + expected = np.array(["a", "b", "b", nulls_fixture], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_safe_sort_multiindex(): + # GH#48412 + arr1 = Series([2, 1, NA, NA], dtype="Int64") + arr2 = [2, 1, 3, 3] + midx = MultiIndex.from_arrays([arr1, arr2]) + result = safe_sort(midx) + expected = MultiIndex.from_arrays( + [Series([1, 2, NA, NA], dtype="Int64"), [1, 2, 3, 3]] + ) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/test_take.py b/venv/lib/python3.10/site-packages/pandas/tests/test_take.py new file mode 100644 index 0000000000000000000000000000000000000000..4f34ab34c35f0c2446597001f59526d4c8b0900d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/test_take.py @@ -0,0 +1,307 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs import iNaT + +import pandas._testing as tm +import pandas.core.algorithms as algos + + +@pytest.fixture( + params=[ + (np.int8, np.int16(127), np.int8), + (np.int8, np.int16(128), np.int16), + (np.int32, 1, np.int32), + (np.int32, 2.0, np.float64), + (np.int32, 3.0 + 4.0j, np.complex128), + (np.int32, True, np.object_), + (np.int32, "", np.object_), + (np.float64, 1, np.float64), + (np.float64, 2.0, np.float64), + (np.float64, 3.0 + 4.0j, np.complex128), + (np.float64, True, np.object_), + (np.float64, "", np.object_), + (np.complex128, 1, np.complex128), + (np.complex128, 2.0, np.complex128), + (np.complex128, 3.0 + 4.0j, np.complex128), + (np.complex128, True, np.object_), + (np.complex128, "", np.object_), + (np.bool_, 1, np.object_), + (np.bool_, 2.0, np.object_), + (np.bool_, 3.0 + 4.0j, np.object_), + (np.bool_, True, np.bool_), + (np.bool_, "", np.object_), + ] +) +def dtype_fill_out_dtype(request): + return request.param + + +class TestTake: + def test_1d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + data = np.random.default_rng(2).integers(0, 2, 4).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, fill_value=fill_value) + assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all() + assert result[3] == fill_value + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + + result = algos.take_nd(data, indexer, fill_value=fill_value) + assert (result[[0, 1, 2, 3]] == data[indexer]).all() + assert result.dtype == dtype + + def test_2d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + data = np.random.default_rng(2).integers(0, 2, (5, 3)).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all() + assert (result[3, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all() + assert (result[:, 3] == fill_value).all() + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all() + assert result.dtype == dtype + + def test_3d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + + data = np.random.default_rng(2).integers(0, 2, (5, 4, 3)).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all() + assert (result[3, :, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all() + assert (result[:, 3, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) + assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all() + assert (result[:, :, 3] == fill_value).all() + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) + assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all() + assert result.dtype == dtype + + def test_1d_other_dtypes(self): + arr = np.random.default_rng(2).standard_normal(10).astype(np.float32) + + indexer = [1, 2, 3, -1] + result = algos.take_nd(arr, indexer) + expected = arr.take(indexer) + expected[-1] = np.nan + tm.assert_almost_equal(result, expected) + + def test_2d_other_dtypes(self): + arr = np.random.default_rng(2).standard_normal((10, 5)).astype(np.float32) + + indexer = [1, 2, 3, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + expected = arr.take(indexer, axis=0) + expected[-1] = np.nan + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected[:, -1] = np.nan + tm.assert_almost_equal(result, expected) + + def test_1d_bool(self): + arr = np.array([0, 1, 0], dtype=bool) + + result = algos.take_nd(arr, [0, 2, 2, 1]) + expected = arr.take([0, 2, 2, 1]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, -1]) + assert result.dtype == np.object_ + + def test_2d_bool(self): + arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool) + + result = algos.take_nd(arr, [0, 2, 2, 1]) + expected = arr.take([0, 2, 2, 1], axis=0) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, 2, 1], axis=1) + expected = arr.take([0, 2, 2, 1], axis=1) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, -1]) + assert result.dtype == np.object_ + + def test_2d_float32(self): + arr = np.random.default_rng(2).standard_normal((4, 3)).astype(np.float32) + indexer = [0, 2, -1, 1, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + + expected = arr.take(indexer, axis=0) + expected[[2, 4], :] = np.nan + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected[:, [2, 4]] = np.nan + tm.assert_almost_equal(result, expected) + + def test_2d_datetime64(self): + # 2005/01/01 - 2006/01/01 + arr = ( + np.random.default_rng(2).integers(11_045_376, 11_360_736, (5, 3)) + * 100_000_000_000 + ) + arr = arr.view(dtype="datetime64[ns]") + indexer = [0, 2, -1, 1, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + expected = arr.take(indexer, axis=0) + expected.view(np.int64)[[2, 4], :] = iNaT + tm.assert_almost_equal(result, expected) + + result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1)) + expected = arr.take(indexer, axis=0) + expected[[2, 4], :] = datetime(2007, 1, 1) + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected.view(np.int64)[:, [2, 4]] = iNaT + tm.assert_almost_equal(result, expected) + + result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1)) + expected = arr.take(indexer, axis=1) + expected[:, [2, 4]] = datetime(2007, 1, 1) + tm.assert_almost_equal(result, expected) + + def test_take_axis_0(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1]) + expected = np.array([[0, 1, 2], [9, 10, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0) + expected = np.array([[0, 1, 2], [0, 0, 0]]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_axis_1(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1], axis=1) + expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0) + expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]]) + tm.assert_numpy_array_equal(result, expected) + + # GH#26976 make sure we validate along the correct axis + with pytest.raises(IndexError, match="indices are out-of-bounds"): + algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0) + + def test_take_non_hashable_fill_value(self): + arr = np.array([1, 2, 3]) + indexer = np.array([1, -1]) + with pytest.raises(ValueError, match="fill_value must be a scalar"): + algos.take(arr, indexer, allow_fill=True, fill_value=[1]) + + # with object dtype it is allowed + arr = np.array([1, 2, 3], dtype=object) + result = algos.take(arr, indexer, allow_fill=True, fill_value=[1]) + expected = np.array([2, [1]], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestExtensionTake: + # The take method found in pd.api.extensions + + def test_bounds_check_large(self): + arr = np.array([1, 2]) + + msg = "indices are out-of-bounds" + with pytest.raises(IndexError, match=msg): + algos.take(arr, [2, 3], allow_fill=True) + + msg = "index 2 is out of bounds for( axis 0 with)? size 2" + with pytest.raises(IndexError, match=msg): + algos.take(arr, [2, 3], allow_fill=False) + + def test_bounds_check_small(self): + arr = np.array([1, 2, 3], dtype=np.int64) + indexer = [0, -1, -2] + + msg = r"'indices' contains values less than allowed \(-2 < -1\)" + with pytest.raises(ValueError, match=msg): + algos.take(arr, indexer, allow_fill=True) + + result = algos.take(arr, indexer) + expected = np.array([1, 3, 2], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("allow_fill", [True, False]) + def test_take_empty(self, allow_fill): + arr = np.array([], dtype=np.int64) + # empty take is ok + result = algos.take(arr, [], allow_fill=allow_fill) + tm.assert_numpy_array_equal(arr, result) + + msg = "|".join( + [ + "cannot do a non-empty take from an empty axes.", + "indices are out-of-bounds", + ] + ) + with pytest.raises(IndexError, match=msg): + algos.take(arr, [0], allow_fill=allow_fill) + + def test_take_na_empty(self): + result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0) + expected = np.array([0.0, 0.0]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_coerces_list(self): + arr = [1, 2, 3] + msg = "take accepting non-standard inputs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.take(arr, [0, 0]) + expected = np.array([1, 1]) + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..38438c121a8731fcf91c7cb6cb268baccf24fc4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014-2022 Matthew Brennan Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3f2fd71bbe71d450630269664cdb269151ad9b1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA @@ -0,0 +1,27 @@ +Metadata-Version: 2.1 +Name: py-cpuinfo +Version: 9.0.0 +Summary: Get CPU info with pure Python +Home-page: https://github.com/workhorsy/py-cpuinfo +Author: Matthew Brennan Jones +Author-email: matthew.brennan.jones@gmail.com +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Topic :: Utilities +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +License-File: LICENSE + +py-cpuinfo +========== + + +Py-cpuinfo gets CPU info with pure Python. Py-cpuinfo should work +without any extra programs or libraries, beyond what your OS provides. +It does not require any compilation(C/C++, assembly, et cetera) to use. +It works with Python 3. + +Documentation can be viewed here: https://github.com/workhorsy/py-cpuinfo + + diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d7991170dc2c1683a273406f6cdb550f78433108 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD @@ -0,0 +1,14 @@ +../../../bin/cpuinfo,sha256=bfyPJX9WO8IdvrCJ2TCkhb_tdrDwxu9w_UmN5KYcZSw,235 +cpuinfo/__init__.py,sha256=T6gndqGAggfJCu4_iOziTnomCN7KzaAK_OYTewE4FMA,44 +cpuinfo/__main__.py,sha256=nSxC6Hqhi-0lN7Z4WwtKdxQdf3cUJefb5hOahCzh4Yg,33 +cpuinfo/__pycache__/__init__.cpython-310.pyc,, +cpuinfo/__pycache__/__main__.cpython-310.pyc,, +cpuinfo/__pycache__/cpuinfo.cpython-310.pyc,, +cpuinfo/cpuinfo.py,sha256=HHyDlDUNovE3QzJ3hviiM1ngyOC4iD7i6oGiz2iTmVk,84388 +py_cpuinfo-9.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py_cpuinfo-9.0.0.dist-info/LICENSE,sha256=3br3Y5a_XHqkWXWiHq_i4i7st9paoNt8sOYVL6r-800,1127 +py_cpuinfo-9.0.0.dist-info/METADATA,sha256=rRFelvhFdoYcXnXXYDAbgdIxQ8_iVUa5lUHgEmU3ncE,794 +py_cpuinfo-9.0.0.dist-info/RECORD,, +py_cpuinfo-9.0.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +py_cpuinfo-9.0.0.dist-info/entry_points.txt,sha256=ZwrsclY_xUA0xJZK98bLxBdcowxnkK0ANYUT4FYcZJ8,42 +py_cpuinfo-9.0.0.dist-info/top_level.txt,sha256=XsjpunhkxD4hvznqQjrFNw0rtgizHEOGzewPZY3UEtU,8 diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..c10718f4d497f1e333eaec47651ab41f5d196efc --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +cpuinfo = cpuinfo:main + diff --git a/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b53b02d61061b32d70bf375f63e0e5d3ee8d4a1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +cpuinfo